diff options
105 files changed, 6561 insertions, 4782 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8249df45d2f2..8dfc9fd094a3 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -51,6 +51,14 @@ ARCH_PERFMON_EVENTSEL_EDGE | \ ARCH_PERFMON_EVENTSEL_INV | \ ARCH_PERFMON_EVENTSEL_CMASK) +#define X86_ALL_EVENT_FLAGS \ + (ARCH_PERFMON_EVENTSEL_EDGE | \ + ARCH_PERFMON_EVENTSEL_INV | \ + ARCH_PERFMON_EVENTSEL_CMASK | \ + ARCH_PERFMON_EVENTSEL_ANY | \ + ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ + HSW_IN_TX | \ + HSW_IN_TX_CHECKPOINTED) #define AMD64_RAW_EVENT_MASK \ (X86_RAW_EVENT_MASK | \ AMD64_EVENTSEL_EVENT) diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 7fd54f09b011..7e1fd4e08552 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,7 +36,9 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o endif diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2879ecdaac43..0646d3b63b9d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -387,7 +387,7 @@ int x86_pmu_hw_config(struct perf_event *event) precise++; /* Support for IP fixup */ - if (x86_pmu.lbr_nr) + if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) precise++; } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 8ade93111e03..fc5eb390b368 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -67,8 +67,10 @@ struct event_constraint { */ #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ -#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style st data sampling */ +#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */ #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ +#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ +#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ struct amd_nb { int nb_id; /* NorthBridge id */ @@ -252,18 +254,52 @@ struct cpu_hw_events { EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) #define INTEL_PLD_CONSTRAINT(c, n) \ - __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ + __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) #define INTEL_PST_CONSTRAINT(c, n) \ - __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ + __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) -/* DataLA version of store sampling without extra enable bit. */ -#define INTEL_PST_HSW_CONSTRAINT(c, n) \ - __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ +/* Event constraint, but match on all event flags too. */ +#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) + +/* Check only flags, but allow all event/umask */ +#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ + EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) + +/* Check flags and event code, and set the HSW store flag */ +#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ + __EVENT_CONSTRAINT(code, n, \ + ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) + +/* Check flags and event code, and set the HSW load flag */ +#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ + __EVENT_CONSTRAINT(code, n, \ + ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) + +/* Check flags and event code/umask, and set the HSW store flag */ +#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ + __EVENT_CONSTRAINT(code, n, \ + INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) +/* Check flags and event code/umask, and set the HSW load flag */ +#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ + __EVENT_CONSTRAINT(code, n, \ + INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) + +/* Check flags and event code/umask, and set the HSW N/A flag */ +#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ + __EVENT_CONSTRAINT(code, n, \ + INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) + + /* * We define the end marker as having a weight of -1 * to enable blacklisting of events using a counter bitmask diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 2502d0d9d246..89bc750efccb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2367,15 +2367,15 @@ __init int intel_pmu_init(void) * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_model) { - case 14: /* 65 nm core solo/duo, "Yonah" */ + case 14: /* 65nm Core "Yonah" */ pr_cont("Core events, "); break; - case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ + case 15: /* 65nm Core2 "Merom" */ x86_add_quirk(intel_clovertown_quirk); - case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ - case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ - case 29: /* six-core 45 nm xeon "Dunnington" */ + case 22: /* 65nm Core2 "Merom-L" */ + case 23: /* 45nm Core2 "Penryn" */ + case 29: /* 45nm Core2 "Dunnington (MP) */ memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2386,9 +2386,9 @@ __init int intel_pmu_init(void) pr_cont("Core2 events, "); break; - case 26: /* 45 nm nehalem, "Bloomfield" */ - case 30: /* 45 nm nehalem, "Lynnfield" */ - case 46: /* 45 nm nehalem-ex, "Beckton" */ + case 30: /* 45nm Nehalem */ + case 26: /* 45nm Nehalem-EP */ + case 46: /* 45nm Nehalem-EX */ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, @@ -2415,11 +2415,11 @@ __init int intel_pmu_init(void) pr_cont("Nehalem events, "); break; - case 28: /* Atom */ - case 38: /* Lincroft */ - case 39: /* Penwell */ - case 53: /* Cloverview */ - case 54: /* Cedarview */ + case 28: /* 45nm Atom "Pineview" */ + case 38: /* 45nm Atom "Lincroft" */ + case 39: /* 32nm Atom "Penwell" */ + case 53: /* 32nm Atom "Cloverview" */ + case 54: /* 32nm Atom "Cedarview" */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2430,8 +2430,8 @@ __init int intel_pmu_init(void) pr_cont("Atom events, "); break; - case 55: /* Atom 22nm "Silvermont" */ - case 77: /* Avoton "Silvermont" */ + case 55: /* 22nm Atom "Silvermont" */ + case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, @@ -2446,9 +2446,9 @@ __init int intel_pmu_init(void) pr_cont("Silvermont events, "); break; - case 37: /* 32 nm nehalem, "Clarkdale" */ - case 44: /* 32 nm nehalem, "Gulftown" */ - case 47: /* 32 nm Xeon E7 */ + case 37: /* 32nm Westmere */ + case 44: /* 32nm Westmere-EP */ + case 47: /* 32nm Westmere-EX */ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, @@ -2474,8 +2474,8 @@ __init int intel_pmu_init(void) pr_cont("Westmere events, "); break; - case 42: /* SandyBridge */ - case 45: /* SandyBridge, "Romely-EP" */ + case 42: /* 32nm SandyBridge */ + case 45: /* 32nm SandyBridge-E/EN/EP */ x86_add_quirk(intel_sandybridge_quirk); memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2506,8 +2506,9 @@ __init int intel_pmu_init(void) pr_cont("SandyBridge events, "); break; - case 58: /* IvyBridge */ - case 62: /* IvyBridge EP */ + + case 58: /* 22nm IvyBridge */ + case 62: /* 22nm IvyBridge-EP/EX */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); /* dTLB-load-misses on IVB is different than SNB */ @@ -2539,11 +2540,11 @@ __init int intel_pmu_init(void) break; - case 60: /* Haswell Client */ - case 70: - case 71: + case 60: /* 22nm Haswell */ case 63: case 69: + case 70: + case 71: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -2552,7 +2553,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_hsw_event_constraints; x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; - x86_pmu.extra_regs = intel_snb_extra_regs; + x86_pmu.extra_regs = intel_snbep_extra_regs; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 696ade311ded..9dc419991772 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -108,14 +108,16 @@ static u64 precise_store_data(u64 status) return val; } -static u64 precise_store_data_hsw(struct perf_event *event, u64 status) +static u64 precise_datala_hsw(struct perf_event *event, u64 status) { union perf_mem_data_src dse; - u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK; - dse.val = 0; - dse.mem_op = PERF_MEM_OP_STORE; - dse.mem_lvl = PERF_MEM_LVL_NA; + dse.val = PERF_MEM_NA; + + if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) + dse.mem_op = PERF_MEM_OP_STORE; + else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) + dse.mem_op = PERF_MEM_OP_LOAD; /* * L1 info only valid for following events: @@ -125,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status) * MEM_UOPS_RETIRED.SPLIT_STORES * MEM_UOPS_RETIRED.ALL_STORES */ - if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0) - return dse.mem_lvl; - - if (status & 1) - dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; - else - dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; - - /* Nothing else supported. Sorry. */ + if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { + if (status & 1) + dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; + else + dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; + } return dse.val; } @@ -569,28 +568,10 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { }; struct event_constraint intel_slm_pebs_event_constraints[] = { - INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */ - INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */ - INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */ - INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */ - INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */ - INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */ - INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */ - INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */ - INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */ - INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */ - INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */ - INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */ - INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */ - INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */ - INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */ - INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */ - INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */ - INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */ - INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */ - INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */ - INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */ - INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END }; @@ -626,68 +607,44 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ - INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ - INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ - INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ - INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), EVENT_CONSTRAINT_END }; struct event_constraint intel_ivb_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ - INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ - INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ - INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ - INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), EVENT_CONSTRAINT_END }; struct event_constraint intel_hsw_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ - INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ - INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ - INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ - INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ - INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */ - INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */ - /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), - /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ - INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), - INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ - /* MEM_UOPS_RETIRED.SPLIT_STORES */ - INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), - INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ - INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ - INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */ - INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */ - INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */ - /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */ - INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf), - /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */ - INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf), - /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */ - INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf), - /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */ - INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf), - INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */ - INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */ - + INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), EVENT_CONSTRAINT_END }; @@ -864,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { +#define PERF_X86_EVENT_PEBS_HSW_PREC \ + (PERF_X86_EVENT_PEBS_ST_HSW | \ + PERF_X86_EVENT_PEBS_LD_HSW | \ + PERF_X86_EVENT_PEBS_NA_HSW) /* * We cast to the biggest pebs_record but are careful not to * unconditionally access the 'extra' entries. @@ -873,42 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event, struct perf_sample_data data; struct pt_regs regs; u64 sample_type; - int fll, fst; + int fll, fst, dsrc; + int fl = event->hw.flags; if (!intel_pmu_save_and_restart(event)) return; - fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; - fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST | - PERF_X86_EVENT_PEBS_ST_HSW); + sample_type = event->attr.sample_type; + dsrc = sample_type & PERF_SAMPLE_DATA_SRC; + + fll = fl & PERF_X86_EVENT_PEBS_LDLAT; + fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); perf_sample_data_init(&data, 0, event->hw.last_period); data.period = event->hw.last_period; - sample_type = event->attr.sample_type; /* - * if PEBS-LL or PreciseStore + * Use latency for weight (only avail with PEBS-LL) */ - if (fll || fst) { - /* - * Use latency for weight (only avail with PEBS-LL) - */ - if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) - data.weight = pebs->lat; - - /* - * data.data_src encodes the data source - */ - if (sample_type & PERF_SAMPLE_DATA_SRC) { - if (fll) - data.data_src.val = load_latency_data(pebs->dse); - else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) - data.data_src.val = - precise_store_data_hsw(event, pebs->dse); - else - data.data_src.val = precise_store_data(pebs->dse); - } + if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) + data.weight = pebs->lat; + + /* + * data.data_src encodes the data source + */ + if (dsrc) { + u64 val = PERF_MEM_NA; + if (fll) + val = load_latency_data(pebs->dse); + else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) + val = precise_datala_hsw(event, pebs->dse); + else if (fst) + val = precise_store_data(pebs->dse); + data.data_src.val = val; } /* @@ -935,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event, else regs.flags &= ~PERF_EFLAGS_EXACT; - if ((event->attr.sample_type & PERF_SAMPLE_ADDR) && + if ((sample_type & PERF_SAMPLE_ADDR) && x86_pmu.intel_cap.pebs_format >= 1) data.addr = pebs->dla; if (x86_pmu.intel_cap.pebs_format >= 2) { /* Only set the TSX weight when no memory weight. */ - if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll) + if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) data.weight = intel_hsw_weight(pebs); - if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION) + if (sample_type & PERF_SAMPLE_TRANSACTION) data.txn = intel_hsw_transaction(pebs); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 0939f86f543d..4785ee8ac599 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -1,83 +1,39 @@ #include "perf_event_intel_uncore.h" static struct intel_uncore_type *empty_uncore[] = { NULL, }; -static struct intel_uncore_type **msr_uncores = empty_uncore; -static struct intel_uncore_type **pci_uncores = empty_uncore; -/* pci bus to socket mapping */ -static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; +struct intel_uncore_type **uncore_msr_uncores = empty_uncore; +struct intel_uncore_type **uncore_pci_uncores = empty_uncore; -static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; +static bool pcidrv_registered; +struct pci_driver *uncore_pci_driver; +/* pci bus to socket mapping */ +int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, }; +struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; static DEFINE_RAW_SPINLOCK(uncore_box_lock); - /* mask of cpus that collect uncore events */ static cpumask_t uncore_cpu_mask; /* constraint for the fixed counter */ -static struct event_constraint constraint_fixed = +static struct event_constraint uncore_constraint_fixed = EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); -static struct event_constraint constraint_empty = +struct event_constraint uncore_constraint_empty = EVENT_CONSTRAINT(0, 0, 0); -#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ - ((1ULL << (n)) - 1))) - -DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); -DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); -DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); -DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); -DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); -DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); -DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); -DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); -DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); -DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); -DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); -DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); -DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); -DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); -DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); -DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); -DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); -DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); -DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); -DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); -DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); -DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); -DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); -DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); -DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); -DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); -DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); -DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); -DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); -DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); -DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); -DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); -DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); -DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); -DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); -DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); -DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); -DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); - -static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); -static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); -static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); -static void uncore_pmu_event_read(struct perf_event *event); - -static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +ssize_t uncore_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) { return container_of(event->pmu, struct intel_uncore_pmu, pmu); } -static struct intel_uncore_box * -uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) +struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) { struct intel_uncore_box *box; @@ -98,7 +54,7 @@ uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) return *per_cpu_ptr(pmu->box, cpu); } -static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) +struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) { /* * perf core schedules event on the basis of cpu, uncore events are @@ -107,7 +63,7 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); } -static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) +u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) { u64 count; @@ -119,7 +75,7 @@ static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_eve /* * generic get constraint function for shared match/mask registers. */ -static struct event_constraint * +struct event_constraint * uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) { struct intel_uncore_extra_reg *er; @@ -154,10 +110,10 @@ uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) return NULL; } - return &constraint_empty; + return &uncore_constraint_empty; } -static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) { struct intel_uncore_extra_reg *er; struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; @@ -178,7 +134,7 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even reg1->alloc = 0; } -static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) +u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) { struct intel_uncore_extra_reg *er; unsigned long flags; @@ -193,2936 +149,6 @@ static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) return config; } -/* Sandy Bridge-EP uncore support */ -static struct intel_uncore_type snbep_uncore_cbox; -static struct intel_uncore_type snbep_uncore_pcu; - -static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); - u32 config = 0; - - if (!pci_read_config_dword(pdev, box_ctl, &config)) { - config |= SNBEP_PMON_BOX_CTL_FRZ; - pci_write_config_dword(pdev, box_ctl, config); - } -} - -static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); - u32 config = 0; - - if (!pci_read_config_dword(pdev, box_ctl, &config)) { - config &= ~SNBEP_PMON_BOX_CTL_FRZ; - pci_write_config_dword(pdev, box_ctl, config); - } -} - -static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); -} - -static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, hwc->config_base, hwc->config); -} - -static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - u64 count = 0; - - pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); - pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); - - return count; -} - -static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - - pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); -} - -static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) -{ - u64 config; - unsigned msr; - - msr = uncore_msr_box_ctl(box); - if (msr) { - rdmsrl(msr, config); - config |= SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); - } -} - -static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) -{ - u64 config; - unsigned msr; - - msr = uncore_msr_box_ctl(box); - if (msr) { - rdmsrl(msr, config); - config &= ~SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); - } -} - -static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); - - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); -} - -static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - wrmsrl(hwc->config_base, hwc->config); -} - -static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) -{ - unsigned msr = uncore_msr_box_ctl(box); - - if (msr) - wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); -} - -static struct attribute *snbep_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - NULL, -}; - -static struct attribute *snbep_uncore_ubox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh5.attr, - NULL, -}; - -static struct attribute *snbep_uncore_cbox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_tid_en.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - &format_attr_filter_tid.attr, - &format_attr_filter_nid.attr, - &format_attr_filter_state.attr, - &format_attr_filter_opc.attr, - NULL, -}; - -static struct attribute *snbep_uncore_pcu_formats_attr[] = { - &format_attr_event_ext.attr, - &format_attr_occ_sel.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh5.attr, - &format_attr_occ_invert.attr, - &format_attr_occ_edge.attr, - &format_attr_filter_band0.attr, - &format_attr_filter_band1.attr, - &format_attr_filter_band2.attr, - &format_attr_filter_band3.attr, - NULL, -}; - -static struct attribute *snbep_uncore_qpi_formats_attr[] = { - &format_attr_event_ext.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - &format_attr_match_rds.attr, - &format_attr_match_rnid30.attr, - &format_attr_match_rnid4.attr, - &format_attr_match_dnid.attr, - &format_attr_match_mc.attr, - &format_attr_match_opc.attr, - &format_attr_match_vnw.attr, - &format_attr_match0.attr, - &format_attr_match1.attr, - &format_attr_mask_rds.attr, - &format_attr_mask_rnid30.attr, - &format_attr_mask_rnid4.attr, - &format_attr_mask_dnid.attr, - &format_attr_mask_mc.attr, - &format_attr_mask_opc.attr, - &format_attr_mask_vnw.attr, - &format_attr_mask0.attr, - &format_attr_mask1.attr, - NULL, -}; - -static struct uncore_event_desc snbep_uncore_imc_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), - INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), - { /* end: all zeroes */ }, -}; - -static struct uncore_event_desc snbep_uncore_qpi_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), - INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), - INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), - { /* end: all zeroes */ }, -}; - -static struct attribute_group snbep_uncore_format_group = { - .name = "format", - .attrs = snbep_uncore_formats_attr, -}; - -static struct attribute_group snbep_uncore_ubox_format_group = { - .name = "format", - .attrs = snbep_uncore_ubox_formats_attr, -}; - -static struct attribute_group snbep_uncore_cbox_format_group = { - .name = "format", - .attrs = snbep_uncore_cbox_formats_attr, -}; - -static struct attribute_group snbep_uncore_pcu_format_group = { - .name = "format", - .attrs = snbep_uncore_pcu_formats_attr, -}; - -static struct attribute_group snbep_uncore_qpi_format_group = { - .name = "format", - .attrs = snbep_uncore_qpi_formats_attr, -}; - -#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_msr_init_box, \ - .disable_box = snbep_uncore_msr_disable_box, \ - .enable_box = snbep_uncore_msr_enable_box, \ - .disable_event = snbep_uncore_msr_disable_event, \ - .enable_event = snbep_uncore_msr_enable_event, \ - .read_counter = uncore_msr_read_counter - -static struct intel_uncore_ops snbep_uncore_msr_ops = { - SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), -}; - -#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_pci_init_box, \ - .disable_box = snbep_uncore_pci_disable_box, \ - .enable_box = snbep_uncore_pci_enable_box, \ - .disable_event = snbep_uncore_pci_disable_event, \ - .read_counter = snbep_uncore_pci_read_counter - -static struct intel_uncore_ops snbep_uncore_pci_ops = { - SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), - .enable_event = snbep_uncore_pci_enable_event, \ -}; - -static struct event_constraint snbep_uncore_cbox_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x01, 0x1), - UNCORE_EVENT_CONSTRAINT(0x02, 0x3), - UNCORE_EVENT_CONSTRAINT(0x04, 0x3), - UNCORE_EVENT_CONSTRAINT(0x05, 0x3), - UNCORE_EVENT_CONSTRAINT(0x07, 0x3), - UNCORE_EVENT_CONSTRAINT(0x09, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x1), - UNCORE_EVENT_CONSTRAINT(0x12, 0x3), - UNCORE_EVENT_CONSTRAINT(0x13, 0x3), - UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), - EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), - UNCORE_EVENT_CONSTRAINT(0x21, 0x3), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x31, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - UNCORE_EVENT_CONSTRAINT(0x35, 0x3), - UNCORE_EVENT_CONSTRAINT(0x36, 0x1), - UNCORE_EVENT_CONSTRAINT(0x37, 0x3), - UNCORE_EVENT_CONSTRAINT(0x38, 0x3), - UNCORE_EVENT_CONSTRAINT(0x39, 0x3), - UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), - EVENT_CONSTRAINT_END -}; - -static struct event_constraint snbep_uncore_r2pcie_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x10, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x3), - UNCORE_EVENT_CONSTRAINT(0x12, 0x1), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x24, 0x3), - UNCORE_EVENT_CONSTRAINT(0x25, 0x3), - UNCORE_EVENT_CONSTRAINT(0x26, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - EVENT_CONSTRAINT_END -}; - -static struct event_constraint snbep_uncore_r3qpi_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x10, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x3), - UNCORE_EVENT_CONSTRAINT(0x12, 0x3), - UNCORE_EVENT_CONSTRAINT(0x13, 0x1), - UNCORE_EVENT_CONSTRAINT(0x20, 0x3), - UNCORE_EVENT_CONSTRAINT(0x21, 0x3), - UNCORE_EVENT_CONSTRAINT(0x22, 0x3), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x24, 0x3), - UNCORE_EVENT_CONSTRAINT(0x25, 0x3), - UNCORE_EVENT_CONSTRAINT(0x26, 0x3), - UNCORE_EVENT_CONSTRAINT(0x28, 0x3), - UNCORE_EVENT_CONSTRAINT(0x29, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), - UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), - UNCORE_EVENT_CONSTRAINT(0x30, 0x3), - UNCORE_EVENT_CONSTRAINT(0x31, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - UNCORE_EVENT_CONSTRAINT(0x36, 0x3), - UNCORE_EVENT_CONSTRAINT(0x37, 0x3), - UNCORE_EVENT_CONSTRAINT(0x38, 0x3), - UNCORE_EVENT_CONSTRAINT(0x39, 0x3), - EVENT_CONSTRAINT_END -}; - -static struct intel_uncore_type snbep_uncore_ubox = { - .name = "ubox", - .num_counters = 2, - .num_boxes = 1, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNBEP_U_MSR_PMON_CTR0, - .event_ctl = SNBEP_U_MSR_PMON_CTL0, - .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, - .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, - .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, - .ops = &snbep_uncore_msr_ops, - .format_group = &snbep_uncore_ubox_format_group, -}; - -static struct extra_reg snbep_uncore_cbox_extra_regs[] = { - SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, - SNBEP_CBO_PMON_CTL_TID_EN, 0x1), - SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), - SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), - SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), - SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), - SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), - SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), - SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), - SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), - SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), - SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), - EVENT_EXTRA_END -}; - -static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct intel_uncore_extra_reg *er = &box->shared_regs[0]; - int i; - - if (uncore_box_is_fake(box)) - return; - - for (i = 0; i < 5; i++) { - if (reg1->alloc & (0x1 << i)) - atomic_sub(1 << (i * 6), &er->ref); - } - reg1->alloc = 0; -} - -static struct event_constraint * -__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, - u64 (*cbox_filter_mask)(int fields)) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct intel_uncore_extra_reg *er = &box->shared_regs[0]; - int i, alloc = 0; - unsigned long flags; - u64 mask; - - if (reg1->idx == EXTRA_REG_NONE) - return NULL; - - raw_spin_lock_irqsave(&er->lock, flags); - for (i = 0; i < 5; i++) { - if (!(reg1->idx & (0x1 << i))) - continue; - if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) - continue; - - mask = cbox_filter_mask(0x1 << i); - if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || - !((reg1->config ^ er->config) & mask)) { - atomic_add(1 << (i * 6), &er->ref); - er->config &= ~mask; - er->config |= reg1->config & mask; - alloc |= (0x1 << i); - } else { - break; - } - } - raw_spin_unlock_irqrestore(&er->lock, flags); - if (i < 5) - goto fail; - - if (!uncore_box_is_fake(box)) - reg1->alloc |= alloc; - - return NULL; -fail: - for (; i >= 0; i--) { - if (alloc & (0x1 << i)) - atomic_sub(1 << (i * 6), &er->ref); - } - return &constraint_empty; -} - -static u64 snbep_cbox_filter_mask(int fields) -{ - u64 mask = 0; - - if (fields & 0x1) - mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; - if (fields & 0x2) - mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; - if (fields & 0x4) - mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; - if (fields & 0x8) - mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; - - return mask; -} - -static struct event_constraint * -snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); -} - -static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct extra_reg *er; - int idx = 0; - - for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { - if (er->event != (event->hw.config & er->config_mask)) - continue; - idx |= er->idx; - } - - if (idx) { - reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + - SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; - reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); - reg1->idx = idx; - } - return 0; -} - -static struct intel_uncore_ops snbep_uncore_cbox_ops = { - SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), - .hw_config = snbep_cbox_hw_config, - .get_constraint = snbep_cbox_get_constraint, - .put_constraint = snbep_cbox_put_constraint, -}; - -static struct intel_uncore_type snbep_uncore_cbox = { - .name = "cbox", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 44, - .event_ctl = SNBEP_C0_MSR_PMON_CTL0, - .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, - .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, - .msr_offset = SNBEP_CBO_MSR_OFFSET, - .num_shared_regs = 1, - .constraints = snbep_uncore_cbox_constraints, - .ops = &snbep_uncore_cbox_ops, - .format_group = &snbep_uncore_cbox_format_group, -}; - -static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - u64 config = reg1->config; - - if (new_idx > reg1->idx) - config <<= 8 * (new_idx - reg1->idx); - else - config >>= 8 * (reg1->idx - new_idx); - - if (modify) { - hwc->config += new_idx - reg1->idx; - reg1->config = config; - reg1->idx = new_idx; - } - return config; -} - -static struct event_constraint * -snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct intel_uncore_extra_reg *er = &box->shared_regs[0]; - unsigned long flags; - int idx = reg1->idx; - u64 mask, config1 = reg1->config; - bool ok = false; - - if (reg1->idx == EXTRA_REG_NONE || - (!uncore_box_is_fake(box) && reg1->alloc)) - return NULL; -again: - mask = 0xffULL << (idx * 8); - raw_spin_lock_irqsave(&er->lock, flags); - if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || - !((config1 ^ er->config) & mask)) { - atomic_add(1 << (idx * 8), &er->ref); - er->config &= ~mask; - er->config |= config1 & mask; - ok = true; - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - if (!ok) { - idx = (idx + 1) % 4; - if (idx != reg1->idx) { - config1 = snbep_pcu_alter_er(event, idx, false); - goto again; - } - return &constraint_empty; - } - - if (!uncore_box_is_fake(box)) { - if (idx != reg1->idx) - snbep_pcu_alter_er(event, idx, true); - reg1->alloc = 1; - } - return NULL; -} - -static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct intel_uncore_extra_reg *er = &box->shared_regs[0]; - - if (uncore_box_is_fake(box) || !reg1->alloc) - return; - - atomic_sub(1 << (reg1->idx * 8), &er->ref); - reg1->alloc = 0; -} - -static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; - - if (ev_sel >= 0xb && ev_sel <= 0xe) { - reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; - reg1->idx = ev_sel - 0xb; - reg1->config = event->attr.config1 & (0xff << reg1->idx); - } - return 0; -} - -static struct intel_uncore_ops snbep_uncore_pcu_ops = { - SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), - .hw_config = snbep_pcu_hw_config, - .get_constraint = snbep_pcu_get_constraint, - .put_constraint = snbep_pcu_put_constraint, -}; - -static struct intel_uncore_type snbep_uncore_pcu = { - .name = "pcu", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, - .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, - .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, - .num_shared_regs = 1, - .ops = &snbep_uncore_pcu_ops, - .format_group = &snbep_uncore_pcu_format_group, -}; - -static struct intel_uncore_type *snbep_msr_uncores[] = { - &snbep_uncore_ubox, - &snbep_uncore_cbox, - &snbep_uncore_pcu, - NULL, -}; - -enum { - SNBEP_PCI_QPI_PORT0_FILTER, - SNBEP_PCI_QPI_PORT1_FILTER, -}; - -static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - - if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { - reg1->idx = 0; - reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; - reg1->config = event->attr.config1; - reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; - reg2->config = event->attr.config2; - } - return 0; -} - -static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - - if (reg1->idx != EXTRA_REG_NONE) { - int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; - struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx]; - WARN_ON_ONCE(!filter_pdev); - if (filter_pdev) { - pci_write_config_dword(filter_pdev, reg1->reg, - (u32)reg1->config); - pci_write_config_dword(filter_pdev, reg1->reg + 4, - (u32)(reg1->config >> 32)); - pci_write_config_dword(filter_pdev, reg2->reg, - (u32)reg2->config); - pci_write_config_dword(filter_pdev, reg2->reg + 4, - (u32)(reg2->config >> 32)); - } - } - - pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); -} - -static struct intel_uncore_ops snbep_uncore_qpi_ops = { - SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), - .enable_event = snbep_qpi_enable_event, - .hw_config = snbep_qpi_hw_config, - .get_constraint = uncore_get_constraint, - .put_constraint = uncore_put_constraint, -}; - -#define SNBEP_UNCORE_PCI_COMMON_INIT() \ - .perf_ctr = SNBEP_PCI_PMON_CTR0, \ - .event_ctl = SNBEP_PCI_PMON_CTL0, \ - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ - .ops = &snbep_uncore_pci_ops, \ - .format_group = &snbep_uncore_format_group - -static struct intel_uncore_type snbep_uncore_ha = { - .name = "ha", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_imc = { - .name = "imc", - .num_counters = 4, - .num_boxes = 4, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, - .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, - .event_descs = snbep_uncore_imc_events, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_qpi = { - .name = "qpi", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCI_PMON_CTR0, - .event_ctl = SNBEP_PCI_PMON_CTL0, - .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, - .num_shared_regs = 1, - .ops = &snbep_uncore_qpi_ops, - .event_descs = snbep_uncore_qpi_events, - .format_group = &snbep_uncore_qpi_format_group, -}; - - -static struct intel_uncore_type snbep_uncore_r2pcie = { - .name = "r2pcie", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r2pcie_constraints, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_r3qpi = { - .name = "r3qpi", - .num_counters = 3, - .num_boxes = 2, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r3qpi_constraints, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -enum { - SNBEP_PCI_UNCORE_HA, - SNBEP_PCI_UNCORE_IMC, - SNBEP_PCI_UNCORE_QPI, - SNBEP_PCI_UNCORE_R2PCIE, - SNBEP_PCI_UNCORE_R3QPI, -}; - -static struct intel_uncore_type *snbep_pci_uncores[] = { - [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, - [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, - [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, - [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, - [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, - NULL, -}; - -static const struct pci_device_id snbep_uncore_pci_ids[] = { - { /* Home Agent */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), - }, - { /* MC Channel 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), - }, - { /* MC Channel 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), - }, - { /* MC Channel 2 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), - }, - { /* MC Channel 3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), - }, - { /* QPI Port 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), - }, - { /* QPI Port 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), - }, - { /* R2PCIe */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), - }, - { /* R3QPI Link 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), - }, - { /* R3QPI Link 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), - .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), - }, - { /* QPI Port 0 filter */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - SNBEP_PCI_QPI_PORT0_FILTER), - }, - { /* QPI Port 0 filter */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - SNBEP_PCI_QPI_PORT1_FILTER), - }, - { /* end: all zeroes */ } -}; - -static struct pci_driver snbep_uncore_pci_driver = { - .name = "snbep_uncore", - .id_table = snbep_uncore_pci_ids, -}; - -/* - * build pci bus to socket mapping - */ -static int snbep_pci2phy_map_init(int devid) -{ - struct pci_dev *ubox_dev = NULL; - int i, bus, nodeid; - int err = 0; - u32 config = 0; - - while (1) { - /* find the UBOX device */ - ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); - if (!ubox_dev) - break; - bus = ubox_dev->bus->number; - /* get the Node ID of the local register */ - err = pci_read_config_dword(ubox_dev, 0x40, &config); - if (err) - break; - nodeid = config; - /* get the Node ID mapping */ - err = pci_read_config_dword(ubox_dev, 0x54, &config); - if (err) - break; - /* - * every three bits in the Node ID mapping register maps - * to a particular node. - */ - for (i = 0; i < 8; i++) { - if (nodeid == ((config >> (3 * i)) & 0x7)) { - pcibus_to_physid[bus] = i; - break; - } - } - } - - if (!err) { - /* - * For PCI bus with no UBOX device, find the next bus - * that has UBOX device and use its mapping. - */ - i = -1; - for (bus = 255; bus >= 0; bus--) { - if (pcibus_to_physid[bus] >= 0) - i = pcibus_to_physid[bus]; - else - pcibus_to_physid[bus] = i; - } - } - - if (ubox_dev) - pci_dev_put(ubox_dev); - - return err ? pcibios_err_to_errno(err) : 0; -} -/* end of Sandy Bridge-EP uncore support */ - -/* IvyTown uncore support */ -static void ivt_uncore_msr_init_box(struct intel_uncore_box *box) -{ - unsigned msr = uncore_msr_box_ctl(box); - if (msr) - wrmsrl(msr, IVT_PMON_BOX_CTL_INT); -} - -static void ivt_uncore_pci_init_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - - pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT); -} - -#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = ivt_uncore_msr_init_box, \ - .disable_box = snbep_uncore_msr_disable_box, \ - .enable_box = snbep_uncore_msr_enable_box, \ - .disable_event = snbep_uncore_msr_disable_event, \ - .enable_event = snbep_uncore_msr_enable_event, \ - .read_counter = uncore_msr_read_counter - -static struct intel_uncore_ops ivt_uncore_msr_ops = { - IVT_UNCORE_MSR_OPS_COMMON_INIT(), -}; - -static struct intel_uncore_ops ivt_uncore_pci_ops = { - .init_box = ivt_uncore_pci_init_box, - .disable_box = snbep_uncore_pci_disable_box, - .enable_box = snbep_uncore_pci_enable_box, - .disable_event = snbep_uncore_pci_disable_event, - .enable_event = snbep_uncore_pci_enable_event, - .read_counter = snbep_uncore_pci_read_counter, -}; - -#define IVT_UNCORE_PCI_COMMON_INIT() \ - .perf_ctr = SNBEP_PCI_PMON_CTR0, \ - .event_ctl = SNBEP_PCI_PMON_CTL0, \ - .event_mask = IVT_PMON_RAW_EVENT_MASK, \ - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ - .ops = &ivt_uncore_pci_ops, \ - .format_group = &ivt_uncore_format_group - -static struct attribute *ivt_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - NULL, -}; - -static struct attribute *ivt_uncore_ubox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh5.attr, - NULL, -}; - -static struct attribute *ivt_uncore_cbox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_tid_en.attr, - &format_attr_thresh8.attr, - &format_attr_filter_tid.attr, - &format_attr_filter_link.attr, - &format_attr_filter_state2.attr, - &format_attr_filter_nid2.attr, - &format_attr_filter_opc2.attr, - NULL, -}; - -static struct attribute *ivt_uncore_pcu_formats_attr[] = { - &format_attr_event_ext.attr, - &format_attr_occ_sel.attr, - &format_attr_edge.attr, - &format_attr_thresh5.attr, - &format_attr_occ_invert.attr, - &format_attr_occ_edge.attr, - &format_attr_filter_band0.attr, - &format_attr_filter_band1.attr, - &format_attr_filter_band2.attr, - &format_attr_filter_band3.attr, - NULL, -}; - -static struct attribute *ivt_uncore_qpi_formats_attr[] = { - &format_attr_event_ext.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_thresh8.attr, - &format_attr_match_rds.attr, - &format_attr_match_rnid30.attr, - &format_attr_match_rnid4.attr, - &format_attr_match_dnid.attr, - &format_attr_match_mc.attr, - &format_attr_match_opc.attr, - &format_attr_match_vnw.attr, - &format_attr_match0.attr, - &format_attr_match1.attr, - &format_attr_mask_rds.attr, - &format_attr_mask_rnid30.attr, - &format_attr_mask_rnid4.attr, - &format_attr_mask_dnid.attr, - &format_attr_mask_mc.attr, - &format_attr_mask_opc.attr, - &format_attr_mask_vnw.attr, - &format_attr_mask0.attr, - &format_attr_mask1.attr, - NULL, -}; - -static struct attribute_group ivt_uncore_format_group = { - .name = "format", - .attrs = ivt_uncore_formats_attr, -}; - -static struct attribute_group ivt_uncore_ubox_format_group = { - .name = "format", - .attrs = ivt_uncore_ubox_formats_attr, -}; - -static struct attribute_group ivt_uncore_cbox_format_group = { - .name = "format", - .attrs = ivt_uncore_cbox_formats_attr, -}; - -static struct attribute_group ivt_uncore_pcu_format_group = { - .name = "format", - .attrs = ivt_uncore_pcu_formats_attr, -}; - -static struct attribute_group ivt_uncore_qpi_format_group = { - .name = "format", - .attrs = ivt_uncore_qpi_formats_attr, -}; - -static struct intel_uncore_type ivt_uncore_ubox = { - .name = "ubox", - .num_counters = 2, - .num_boxes = 1, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNBEP_U_MSR_PMON_CTR0, - .event_ctl = SNBEP_U_MSR_PMON_CTL0, - .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK, - .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, - .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, - .ops = &ivt_uncore_msr_ops, - .format_group = &ivt_uncore_ubox_format_group, -}; - -static struct extra_reg ivt_uncore_cbox_extra_regs[] = { - SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, - SNBEP_CBO_PMON_CTL_TID_EN, 0x1), - SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), - - SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), - SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), - SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), - SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), - SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), - EVENT_EXTRA_END -}; - -static u64 ivt_cbox_filter_mask(int fields) -{ - u64 mask = 0; - - if (fields & 0x1) - mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID; - if (fields & 0x2) - mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK; - if (fields & 0x4) - mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE; - if (fields & 0x8) - mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID; - if (fields & 0x10) - mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC; - - return mask; -} - -static struct event_constraint * -ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask); -} - -static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct extra_reg *er; - int idx = 0; - - for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) { - if (er->event != (event->hw.config & er->config_mask)) - continue; - idx |= er->idx; - } - - if (idx) { - reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + - SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; - reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx); - reg1->idx = idx; - } - return 0; -} - -static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - if (reg1->idx != EXTRA_REG_NONE) { - u64 filter = uncore_shared_reg_config(box, 0); - wrmsrl(reg1->reg, filter & 0xffffffff); - wrmsrl(reg1->reg + 6, filter >> 32); - } - - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); -} - -static struct intel_uncore_ops ivt_uncore_cbox_ops = { - .init_box = ivt_uncore_msr_init_box, - .disable_box = snbep_uncore_msr_disable_box, - .enable_box = snbep_uncore_msr_enable_box, - .disable_event = snbep_uncore_msr_disable_event, - .enable_event = ivt_cbox_enable_event, - .read_counter = uncore_msr_read_counter, - .hw_config = ivt_cbox_hw_config, - .get_constraint = ivt_cbox_get_constraint, - .put_constraint = snbep_cbox_put_constraint, -}; - -static struct intel_uncore_type ivt_uncore_cbox = { - .name = "cbox", - .num_counters = 4, - .num_boxes = 15, - .perf_ctr_bits = 44, - .event_ctl = SNBEP_C0_MSR_PMON_CTL0, - .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, - .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, - .msr_offset = SNBEP_CBO_MSR_OFFSET, - .num_shared_regs = 1, - .constraints = snbep_uncore_cbox_constraints, - .ops = &ivt_uncore_cbox_ops, - .format_group = &ivt_uncore_cbox_format_group, -}; - -static struct intel_uncore_ops ivt_uncore_pcu_ops = { - IVT_UNCORE_MSR_OPS_COMMON_INIT(), - .hw_config = snbep_pcu_hw_config, - .get_constraint = snbep_pcu_get_constraint, - .put_constraint = snbep_pcu_put_constraint, -}; - -static struct intel_uncore_type ivt_uncore_pcu = { - .name = "pcu", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, - .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, - .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, - .num_shared_regs = 1, - .ops = &ivt_uncore_pcu_ops, - .format_group = &ivt_uncore_pcu_format_group, -}; - -static struct intel_uncore_type *ivt_msr_uncores[] = { - &ivt_uncore_ubox, - &ivt_uncore_cbox, - &ivt_uncore_pcu, - NULL, -}; - -static struct intel_uncore_type ivt_uncore_ha = { - .name = "ha", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - IVT_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type ivt_uncore_imc = { - .name = "imc", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, - .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, - IVT_UNCORE_PCI_COMMON_INIT(), -}; - -/* registers in IRP boxes are not properly aligned */ -static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; -static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; - -static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], - hwc->config | SNBEP_PMON_CTL_EN); -} - -static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config); -} - -static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - u64 count = 0; - - pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count); - pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); - - return count; -} - -static struct intel_uncore_ops ivt_uncore_irp_ops = { - .init_box = ivt_uncore_pci_init_box, - .disable_box = snbep_uncore_pci_disable_box, - .enable_box = snbep_uncore_pci_enable_box, - .disable_event = ivt_uncore_irp_disable_event, - .enable_event = ivt_uncore_irp_enable_event, - .read_counter = ivt_uncore_irp_read_counter, -}; - -static struct intel_uncore_type ivt_uncore_irp = { - .name = "irp", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_mask = IVT_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, - .ops = &ivt_uncore_irp_ops, - .format_group = &ivt_uncore_format_group, -}; - -static struct intel_uncore_ops ivt_uncore_qpi_ops = { - .init_box = ivt_uncore_pci_init_box, - .disable_box = snbep_uncore_pci_disable_box, - .enable_box = snbep_uncore_pci_enable_box, - .disable_event = snbep_uncore_pci_disable_event, - .enable_event = snbep_qpi_enable_event, - .read_counter = snbep_uncore_pci_read_counter, - .hw_config = snbep_qpi_hw_config, - .get_constraint = uncore_get_constraint, - .put_constraint = uncore_put_constraint, -}; - -static struct intel_uncore_type ivt_uncore_qpi = { - .name = "qpi", - .num_counters = 4, - .num_boxes = 3, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCI_PMON_CTR0, - .event_ctl = SNBEP_PCI_PMON_CTL0, - .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, - .num_shared_regs = 1, - .ops = &ivt_uncore_qpi_ops, - .format_group = &ivt_uncore_qpi_format_group, -}; - -static struct intel_uncore_type ivt_uncore_r2pcie = { - .name = "r2pcie", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r2pcie_constraints, - IVT_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type ivt_uncore_r3qpi = { - .name = "r3qpi", - .num_counters = 3, - .num_boxes = 2, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r3qpi_constraints, - IVT_UNCORE_PCI_COMMON_INIT(), -}; - -enum { - IVT_PCI_UNCORE_HA, - IVT_PCI_UNCORE_IMC, - IVT_PCI_UNCORE_IRP, - IVT_PCI_UNCORE_QPI, - IVT_PCI_UNCORE_R2PCIE, - IVT_PCI_UNCORE_R3QPI, -}; - -static struct intel_uncore_type *ivt_pci_uncores[] = { - [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha, - [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc, - [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp, - [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi, - [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie, - [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi, - NULL, -}; - -static const struct pci_device_id ivt_uncore_pci_ids[] = { - { /* Home Agent 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0), - }, - { /* Home Agent 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1), - }, - { /* MC0 Channel 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0), - }, - { /* MC0 Channel 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1), - }, - { /* MC0 Channel 3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2), - }, - { /* MC0 Channel 4 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3), - }, - { /* MC1 Channel 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4), - }, - { /* MC1 Channel 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5), - }, - { /* MC1 Channel 3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6), - }, - { /* MC1 Channel 4 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7), - }, - { /* IRP */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0), - }, - { /* QPI0 Port 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0), - }, - { /* QPI0 Port 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1), - }, - { /* QPI1 Port 2 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2), - }, - { /* R2PCIe */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0), - }, - { /* R3QPI0 Link 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0), - }, - { /* R3QPI0 Link 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1), - }, - { /* R3QPI1 Link 2 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), - .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2), - }, - { /* QPI Port 0 filter */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - SNBEP_PCI_QPI_PORT0_FILTER), - }, - { /* QPI Port 0 filter */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - SNBEP_PCI_QPI_PORT1_FILTER), - }, - { /* end: all zeroes */ } -}; - -static struct pci_driver ivt_uncore_pci_driver = { - .name = "ivt_uncore", - .id_table = ivt_uncore_pci_ids, -}; -/* end of IvyTown uncore support */ - -/* Sandy Bridge uncore support */ -static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); -} - -static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - wrmsrl(event->hw.config_base, 0); -} - -static void snb_uncore_msr_init_box(struct intel_uncore_box *box) -{ - if (box->pmu->pmu_idx == 0) { - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, - SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); - } -} - -static struct uncore_event_desc snb_uncore_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - { /* end: all zeroes */ }, -}; - -static struct attribute *snb_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask5.attr, - NULL, -}; - -static struct attribute_group snb_uncore_format_group = { - .name = "format", - .attrs = snb_uncore_formats_attr, -}; - -static struct intel_uncore_ops snb_uncore_msr_ops = { - .init_box = snb_uncore_msr_init_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = snb_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, -}; - -static struct event_constraint snb_uncore_cbox_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x80, 0x1), - UNCORE_EVENT_CONSTRAINT(0x83, 0x1), - EVENT_CONSTRAINT_END -}; - -static struct intel_uncore_type snb_uncore_cbox = { - .name = "cbox", - .num_counters = 2, - .num_boxes = 4, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, - .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, - .fixed_ctr = SNB_UNC_FIXED_CTR, - .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, - .single_fixed = 1, - .event_mask = SNB_UNC_RAW_EVENT_MASK, - .msr_offset = SNB_UNC_CBO_MSR_OFFSET, - .constraints = snb_uncore_cbox_constraints, - .ops = &snb_uncore_msr_ops, - .format_group = &snb_uncore_format_group, - .event_descs = snb_uncore_events, -}; - -static struct intel_uncore_type *snb_msr_uncores[] = { - &snb_uncore_cbox, - NULL, -}; - -enum { - SNB_PCI_UNCORE_IMC, -}; - -static struct uncore_event_desc snb_uncore_imc_events[] = { - INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), - INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), - INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), - - INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), - INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), - INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), - - { /* end: all zeroes */ }, -}; - -#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff -#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 - -/* page size multiple covering all config regs */ -#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 - -#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 -#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 -#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 -#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 -#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE - -static struct attribute *snb_uncore_imc_formats_attr[] = { - &format_attr_event.attr, - NULL, -}; - -static struct attribute_group snb_uncore_imc_format_group = { - .name = "format", - .attrs = snb_uncore_imc_formats_attr, -}; - -static void snb_uncore_imc_init_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; - resource_size_t addr; - u32 pci_dword; - - pci_read_config_dword(pdev, where, &pci_dword); - addr = pci_dword; - -#ifdef CONFIG_PHYS_ADDR_T_64BIT - pci_read_config_dword(pdev, where + 4, &pci_dword); - addr |= ((resource_size_t)pci_dword << 32); -#endif - - addr &= ~(PAGE_SIZE - 1); - - box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); - box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; -} - -static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) -{} - -static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) -{} - -static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{} - -static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) -{} - -static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); -} - -/* - * custom event_init() function because we define our own fixed, free - * running counters, so we do not want to conflict with generic uncore - * logic. Also simplifies processing - */ -static int snb_uncore_imc_event_init(struct perf_event *event) -{ - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - struct hw_perf_event *hwc = &event->hw; - u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; - int idx, base; - - if (event->attr.type != event->pmu->type) - return -ENOENT; - - pmu = uncore_event_to_pmu(event); - /* no device found for this pmu */ - if (pmu->func_id < 0) - return -ENOENT; - - /* Sampling not supported yet */ - if (hwc->sample_period) - return -EINVAL; - - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ - return -EINVAL; - - /* - * Place all uncore events for a particular physical package - * onto a single cpu - */ - if (event->cpu < 0) - return -EINVAL; - - /* check only supported bits are set */ - if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) - return -EINVAL; - - box = uncore_pmu_to_box(pmu, event->cpu); - if (!box || box->cpu < 0) - return -EINVAL; - - event->cpu = box->cpu; - - event->hw.idx = -1; - event->hw.last_tag = ~0ULL; - event->hw.extra_reg.idx = EXTRA_REG_NONE; - event->hw.branch_reg.idx = EXTRA_REG_NONE; - /* - * check event is known (whitelist, determines counter) - */ - switch (cfg) { - case SNB_UNCORE_PCI_IMC_DATA_READS: - base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; - idx = UNCORE_PMC_IDX_FIXED; - break; - case SNB_UNCORE_PCI_IMC_DATA_WRITES: - base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; - idx = UNCORE_PMC_IDX_FIXED + 1; - break; - default: - return -EINVAL; - } - - /* must be done before validate_group */ - event->hw.event_base = base; - event->hw.config = cfg; - event->hw.idx = idx; - - /* no group validation needed, we have free running counters */ - - return 0; -} - -static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - return 0; -} - -static void snb_uncore_imc_event_start(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - u64 count; - - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) - return; - - event->hw.state = 0; - box->n_active++; - - list_add_tail(&event->active_entry, &box->active_list); - - count = snb_uncore_imc_read_counter(box, event); - local64_set(&event->hw.prev_count, count); - - if (box->n_active == 1) - uncore_pmu_start_hrtimer(box); -} - -static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!(hwc->state & PERF_HES_STOPPED)) { - box->n_active--; - - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - hwc->state |= PERF_HES_STOPPED; - - list_del(&event->active_entry); - - if (box->n_active == 0) - uncore_pmu_cancel_hrtimer(box); - } - - if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - /* - * Drain the remaining delta count out of a event - * that we are disabling: - */ - uncore_perf_event_update(box, event); - hwc->state |= PERF_HES_UPTODATE; - } -} - -static int snb_uncore_imc_event_add(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!box) - return -ENODEV; - - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - if (!(flags & PERF_EF_START)) - hwc->state |= PERF_HES_ARCH; - - snb_uncore_imc_event_start(event, 0); - - box->n_events++; - - return 0; -} - -static void snb_uncore_imc_event_del(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - int i; - - snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); - - for (i = 0; i < box->n_events; i++) { - if (event == box->event_list[i]) { - --box->n_events; - break; - } - } -} - -static int snb_pci2phy_map_init(int devid) -{ - struct pci_dev *dev = NULL; - int bus; - - dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); - if (!dev) - return -ENOTTY; - - bus = dev->bus->number; - - pcibus_to_physid[bus] = 0; - - pci_dev_put(dev); - - return 0; -} - -static struct pmu snb_uncore_imc_pmu = { - .task_ctx_nr = perf_invalid_context, - .event_init = snb_uncore_imc_event_init, - .add = snb_uncore_imc_event_add, - .del = snb_uncore_imc_event_del, - .start = snb_uncore_imc_event_start, - .stop = snb_uncore_imc_event_stop, - .read = uncore_pmu_event_read, -}; - -static struct intel_uncore_ops snb_uncore_imc_ops = { - .init_box = snb_uncore_imc_init_box, - .enable_box = snb_uncore_imc_enable_box, - .disable_box = snb_uncore_imc_disable_box, - .disable_event = snb_uncore_imc_disable_event, - .enable_event = snb_uncore_imc_enable_event, - .hw_config = snb_uncore_imc_hw_config, - .read_counter = snb_uncore_imc_read_counter, -}; - -static struct intel_uncore_type snb_uncore_imc = { - .name = "imc", - .num_counters = 2, - .num_boxes = 1, - .fixed_ctr_bits = 32, - .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, - .event_descs = snb_uncore_imc_events, - .format_group = &snb_uncore_imc_format_group, - .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, - .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, - .ops = &snb_uncore_imc_ops, - .pmu = &snb_uncore_imc_pmu, -}; - -static struct intel_uncore_type *snb_pci_uncores[] = { - [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, - NULL, -}; - -static const struct pci_device_id snb_uncore_pci_ids[] = { - { /* IMC */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), - .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), - }, - { /* end: all zeroes */ }, -}; - -static const struct pci_device_id ivb_uncore_pci_ids[] = { - { /* IMC */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), - .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), - }, - { /* end: all zeroes */ }, -}; - -static const struct pci_device_id hsw_uncore_pci_ids[] = { - { /* IMC */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), - .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), - }, - { /* end: all zeroes */ }, -}; - -static struct pci_driver snb_uncore_pci_driver = { - .name = "snb_uncore", - .id_table = snb_uncore_pci_ids, -}; - -static struct pci_driver ivb_uncore_pci_driver = { - .name = "ivb_uncore", - .id_table = ivb_uncore_pci_ids, -}; - -static struct pci_driver hsw_uncore_pci_driver = { - .name = "hsw_uncore", - .id_table = hsw_uncore_pci_ids, -}; - -/* end of Sandy Bridge uncore support */ - -/* Nehalem uncore support */ -static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) -{ - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); -} - -static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) -{ - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); -} - -static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); -} - -static struct attribute *nhm_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask8.attr, - NULL, -}; - -static struct attribute_group nhm_uncore_format_group = { - .name = "format", - .attrs = nhm_uncore_formats_attr, -}; - -static struct uncore_event_desc nhm_uncore_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), - { /* end: all zeroes */ }, -}; - -static struct intel_uncore_ops nhm_uncore_msr_ops = { - .disable_box = nhm_uncore_msr_disable_box, - .enable_box = nhm_uncore_msr_enable_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = nhm_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, -}; - -static struct intel_uncore_type nhm_uncore = { - .name = "", - .num_counters = 8, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = NHM_UNC_PERFEVTSEL0, - .perf_ctr = NHM_UNC_UNCORE_PMC0, - .fixed_ctr = NHM_UNC_FIXED_CTR, - .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, - .event_mask = NHM_UNC_RAW_EVENT_MASK, - .event_descs = nhm_uncore_events, - .ops = &nhm_uncore_msr_ops, - .format_group = &nhm_uncore_format_group, -}; - -static struct intel_uncore_type *nhm_msr_uncores[] = { - &nhm_uncore, - NULL, -}; -/* end of Nehalem uncore support */ - -/* Nehalem-EX uncore support */ -DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); -DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); -DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); -DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); - -static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) -{ - wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); -} - -static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) -{ - unsigned msr = uncore_msr_box_ctl(box); - u64 config; - - if (msr) { - rdmsrl(msr, config); - config &= ~((1ULL << uncore_num_counters(box)) - 1); - /* WBox has a fixed counter */ - if (uncore_msr_fixed_ctl(box)) - config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; - wrmsrl(msr, config); - } -} - -static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) -{ - unsigned msr = uncore_msr_box_ctl(box); - u64 config; - - if (msr) { - rdmsrl(msr, config); - config |= (1ULL << uncore_num_counters(box)) - 1; - /* WBox has a fixed counter */ - if (uncore_msr_fixed_ctl(box)) - config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; - wrmsrl(msr, config); - } -} - -static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - wrmsrl(event->hw.config_base, 0); -} - -static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx >= UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); - else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); - else - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); -} - -#define NHMEX_UNCORE_OPS_COMMON_INIT() \ - .init_box = nhmex_uncore_msr_init_box, \ - .disable_box = nhmex_uncore_msr_disable_box, \ - .enable_box = nhmex_uncore_msr_enable_box, \ - .disable_event = nhmex_uncore_msr_disable_event, \ - .read_counter = uncore_msr_read_counter - -static struct intel_uncore_ops nhmex_uncore_ops = { - NHMEX_UNCORE_OPS_COMMON_INIT(), - .enable_event = nhmex_uncore_msr_enable_event, -}; - -static struct attribute *nhmex_uncore_ubox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_edge.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_ubox_format_group = { - .name = "format", - .attrs = nhmex_uncore_ubox_formats_attr, -}; - -static struct intel_uncore_type nhmex_uncore_ubox = { - .name = "ubox", - .num_counters = 1, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, - .perf_ctr = NHMEX_U_MSR_PMON_CTR, - .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, - .ops = &nhmex_uncore_ops, - .format_group = &nhmex_uncore_ubox_format_group -}; - -static struct attribute *nhmex_uncore_cbox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_cbox_format_group = { - .name = "format", - .attrs = nhmex_uncore_cbox_formats_attr, -}; - -/* msr offset for each instance of cbox */ -static unsigned nhmex_cbox_msr_offsets[] = { - 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, -}; - -static struct intel_uncore_type nhmex_uncore_cbox = { - .name = "cbox", - .num_counters = 6, - .num_boxes = 10, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, - .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, - .event_mask = NHMEX_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, - .msr_offsets = nhmex_cbox_msr_offsets, - .pair_ctr_ctl = 1, - .ops = &nhmex_uncore_ops, - .format_group = &nhmex_uncore_cbox_format_group -}; - -static struct uncore_event_desc nhmex_uncore_wbox_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), - { /* end: all zeroes */ }, -}; - -static struct intel_uncore_type nhmex_uncore_wbox = { - .name = "wbox", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_W_MSR_PMON_CNT0, - .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, - .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, - .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, - .event_mask = NHMEX_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, - .pair_ctr_ctl = 1, - .event_descs = nhmex_uncore_wbox_events, - .ops = &nhmex_uncore_ops, - .format_group = &nhmex_uncore_cbox_format_group -}; - -static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int ctr, ev_sel; - - ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> - NHMEX_B_PMON_CTR_SHIFT; - ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> - NHMEX_B_PMON_CTL_EV_SEL_SHIFT; - - /* events that do not use the match/mask registers */ - if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || - (ctr == 2 && ev_sel != 0x4) || ctr == 3) - return 0; - - if (box->pmu->pmu_idx == 0) - reg1->reg = NHMEX_B0_MSR_MATCH; - else - reg1->reg = NHMEX_B1_MSR_MATCH; - reg1->idx = 0; - reg1->config = event->attr.config1; - reg2->config = event->attr.config2; - return 0; -} - -static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - - if (reg1->idx != EXTRA_REG_NONE) { - wrmsrl(reg1->reg, reg1->config); - wrmsrl(reg1->reg + 1, reg2->config); - } - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | - (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); -} - -/* - * The Bbox has 4 counters, but each counter monitors different events. - * Use bits 6-7 in the event config to select counter. - */ -static struct event_constraint nhmex_uncore_bbox_constraints[] = { - EVENT_CONSTRAINT(0 , 1, 0xc0), - EVENT_CONSTRAINT(0x40, 2, 0xc0), - EVENT_CONSTRAINT(0x80, 4, 0xc0), - EVENT_CONSTRAINT(0xc0, 8, 0xc0), - EVENT_CONSTRAINT_END, -}; - -static struct attribute *nhmex_uncore_bbox_formats_attr[] = { - &format_attr_event5.attr, - &format_attr_counter.attr, - &format_attr_match.attr, - &format_attr_mask.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_bbox_format_group = { - .name = "format", - .attrs = nhmex_uncore_bbox_formats_attr, -}; - -static struct intel_uncore_ops nhmex_uncore_bbox_ops = { - NHMEX_UNCORE_OPS_COMMON_INIT(), - .enable_event = nhmex_bbox_msr_enable_event, - .hw_config = nhmex_bbox_hw_config, - .get_constraint = uncore_get_constraint, - .put_constraint = uncore_put_constraint, -}; - -static struct intel_uncore_type nhmex_uncore_bbox = { - .name = "bbox", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_B0_MSR_PMON_CTL0, - .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, - .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, - .msr_offset = NHMEX_B_MSR_OFFSET, - .pair_ctr_ctl = 1, - .num_shared_regs = 1, - .constraints = nhmex_uncore_bbox_constraints, - .ops = &nhmex_uncore_bbox_ops, - .format_group = &nhmex_uncore_bbox_format_group -}; - -static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - - /* only TO_R_PROG_EV event uses the match/mask register */ - if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != - NHMEX_S_EVENT_TO_R_PROG_EV) - return 0; - - if (box->pmu->pmu_idx == 0) - reg1->reg = NHMEX_S0_MSR_MM_CFG; - else - reg1->reg = NHMEX_S1_MSR_MM_CFG; - reg1->idx = 0; - reg1->config = event->attr.config1; - reg2->config = event->attr.config2; - return 0; -} - -static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - - if (reg1->idx != EXTRA_REG_NONE) { - wrmsrl(reg1->reg, 0); - wrmsrl(reg1->reg + 1, reg1->config); - wrmsrl(reg1->reg + 2, reg2->config); - wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); - } - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); -} - -static struct attribute *nhmex_uncore_sbox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - &format_attr_match.attr, - &format_attr_mask.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_sbox_format_group = { - .name = "format", - .attrs = nhmex_uncore_sbox_formats_attr, -}; - -static struct intel_uncore_ops nhmex_uncore_sbox_ops = { - NHMEX_UNCORE_OPS_COMMON_INIT(), - .enable_event = nhmex_sbox_msr_enable_event, - .hw_config = nhmex_sbox_hw_config, - .get_constraint = uncore_get_constraint, - .put_constraint = uncore_put_constraint, -}; - -static struct intel_uncore_type nhmex_uncore_sbox = { - .name = "sbox", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_S0_MSR_PMON_CTL0, - .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, - .event_mask = NHMEX_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, - .msr_offset = NHMEX_S_MSR_OFFSET, - .pair_ctr_ctl = 1, - .num_shared_regs = 1, - .ops = &nhmex_uncore_sbox_ops, - .format_group = &nhmex_uncore_sbox_format_group -}; - -enum { - EXTRA_REG_NHMEX_M_FILTER, - EXTRA_REG_NHMEX_M_DSP, - EXTRA_REG_NHMEX_M_ISS, - EXTRA_REG_NHMEX_M_MAP, - EXTRA_REG_NHMEX_M_MSC_THR, - EXTRA_REG_NHMEX_M_PGT, - EXTRA_REG_NHMEX_M_PLD, - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, -}; - -static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { - MBOX_INC_SEL_EXTAR_REG(0x0, DSP), - MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), - MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), - MBOX_INC_SEL_EXTAR_REG(0x9, ISS), - /* event 0xa uses two extra registers */ - MBOX_INC_SEL_EXTAR_REG(0xa, ISS), - MBOX_INC_SEL_EXTAR_REG(0xa, PLD), - MBOX_INC_SEL_EXTAR_REG(0xb, PLD), - /* events 0xd ~ 0x10 use the same extra register */ - MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), - MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), - MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), - MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), - MBOX_INC_SEL_EXTAR_REG(0x16, PGT), - MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), - MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), - MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), - MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), - EVENT_EXTRA_END -}; - -/* Nehalem-EX or Westmere-EX ? */ -static bool uncore_nhmex; - -static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) -{ - struct intel_uncore_extra_reg *er; - unsigned long flags; - bool ret = false; - u64 mask; - - if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { - er = &box->shared_regs[idx]; - raw_spin_lock_irqsave(&er->lock, flags); - if (!atomic_read(&er->ref) || er->config == config) { - atomic_inc(&er->ref); - er->config = config; - ret = true; - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - return ret; - } - /* - * The ZDP_CTL_FVC MSR has 4 fields which are used to control - * events 0xd ~ 0x10. Besides these 4 fields, there are additional - * fields which are shared. - */ - idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - if (WARN_ON_ONCE(idx >= 4)) - return false; - - /* mask of the shared fields */ - if (uncore_nhmex) - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; - else - mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; - er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; - - raw_spin_lock_irqsave(&er->lock, flags); - /* add mask of the non-shared field if it's in use */ - if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { - if (uncore_nhmex) - mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - else - mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - } - - if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { - atomic_add(1 << (idx * 8), &er->ref); - if (uncore_nhmex) - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | - NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - else - mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | - WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - er->config &= ~mask; - er->config |= (config & mask); - ret = true; - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - return ret; -} - -static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) -{ - struct intel_uncore_extra_reg *er; - - if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { - er = &box->shared_regs[idx]; - atomic_dec(&er->ref); - return; - } - - idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; - atomic_sub(1 << (idx * 8), &er->ref); -} - -static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); - u64 config = reg1->config; - - /* get the non-shared control bits and shift them */ - idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - if (uncore_nhmex) - config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - else - config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); - if (new_idx > orig_idx) { - idx = new_idx - orig_idx; - config <<= 3 * idx; - } else { - idx = orig_idx - new_idx; - config >>= 3 * idx; - } - - /* add the shared control bits back */ - if (uncore_nhmex) - config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; - else - config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; - config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; - if (modify) { - /* adjust the main event selector */ - if (new_idx > orig_idx) - hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; - else - hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; - reg1->config = config; - reg1->idx = ~0xff | new_idx; - } - return config; -} - -static struct event_constraint * -nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - int i, idx[2], alloc = 0; - u64 config1 = reg1->config; - - idx[0] = __BITS_VALUE(reg1->idx, 0, 8); - idx[1] = __BITS_VALUE(reg1->idx, 1, 8); -again: - for (i = 0; i < 2; i++) { - if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) - idx[i] = 0xff; - - if (idx[i] == 0xff) - continue; - - if (!nhmex_mbox_get_shared_reg(box, idx[i], - __BITS_VALUE(config1, i, 32))) - goto fail; - alloc |= (0x1 << i); - } - - /* for the match/mask registers */ - if (reg2->idx != EXTRA_REG_NONE && - (uncore_box_is_fake(box) || !reg2->alloc) && - !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) - goto fail; - - /* - * If it's a fake box -- as per validate_{group,event}() we - * shouldn't touch event state and we can avoid doing so - * since both will only call get_event_constraints() once - * on each event, this avoids the need for reg->alloc. - */ - if (!uncore_box_is_fake(box)) { - if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) - nhmex_mbox_alter_er(event, idx[0], true); - reg1->alloc |= alloc; - if (reg2->idx != EXTRA_REG_NONE) - reg2->alloc = 1; - } - return NULL; -fail: - if (idx[0] != 0xff && !(alloc & 0x1) && - idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { - /* - * events 0xd ~ 0x10 are functional identical, but are - * controlled by different fields in the ZDP_CTL_FVC - * register. If we failed to take one field, try the - * rest 3 choices. - */ - BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); - idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - idx[0] = (idx[0] + 1) % 4; - idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { - config1 = nhmex_mbox_alter_er(event, idx[0], false); - goto again; - } - } - - if (alloc & 0x1) - nhmex_mbox_put_shared_reg(box, idx[0]); - if (alloc & 0x2) - nhmex_mbox_put_shared_reg(box, idx[1]); - return &constraint_empty; -} - -static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - - if (uncore_box_is_fake(box)) - return; - - if (reg1->alloc & 0x1) - nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); - if (reg1->alloc & 0x2) - nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); - reg1->alloc = 0; - - if (reg2->alloc) { - nhmex_mbox_put_shared_reg(box, reg2->idx); - reg2->alloc = 0; - } -} - -static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) -{ - if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) - return er->idx; - return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; -} - -static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct intel_uncore_type *type = box->pmu->type; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - struct extra_reg *er; - unsigned msr; - int reg_idx = 0; - /* - * The mbox events may require 2 extra MSRs at the most. But only - * the lower 32 bits in these MSRs are significant, so we can use - * config1 to pass two MSRs' config. - */ - for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { - if (er->event != (event->hw.config & er->config_mask)) - continue; - if (event->attr.config1 & ~er->valid_mask) - return -EINVAL; - - msr = er->msr + type->msr_offset * box->pmu->pmu_idx; - if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) - return -EINVAL; - - /* always use the 32~63 bits to pass the PLD config */ - if (er->idx == EXTRA_REG_NHMEX_M_PLD) - reg_idx = 1; - else if (WARN_ON_ONCE(reg_idx > 0)) - return -EINVAL; - - reg1->idx &= ~(0xff << (reg_idx * 8)); - reg1->reg &= ~(0xffff << (reg_idx * 16)); - reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); - reg1->reg |= msr << (reg_idx * 16); - reg1->config = event->attr.config1; - reg_idx++; - } - /* - * The mbox only provides ability to perform address matching - * for the PLD events. - */ - if (reg_idx == 2) { - reg2->idx = EXTRA_REG_NHMEX_M_FILTER; - if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) - reg2->config = event->attr.config2; - else - reg2->config = ~0ULL; - if (box->pmu->pmu_idx == 0) - reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; - else - reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; - } - return 0; -} - -static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) -{ - struct intel_uncore_extra_reg *er; - unsigned long flags; - u64 config; - - if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) - return box->shared_regs[idx].config; - - er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; - raw_spin_lock_irqsave(&er->lock, flags); - config = er->config; - raw_spin_unlock_irqrestore(&er->lock, flags); - return config; -} - -static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int idx; - - idx = __BITS_VALUE(reg1->idx, 0, 8); - if (idx != 0xff) - wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), - nhmex_mbox_shared_reg_config(box, idx)); - idx = __BITS_VALUE(reg1->idx, 1, 8); - if (idx != 0xff) - wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), - nhmex_mbox_shared_reg_config(box, idx)); - - if (reg2->idx != EXTRA_REG_NONE) { - wrmsrl(reg2->reg, 0); - if (reg2->config != ~0ULL) { - wrmsrl(reg2->reg + 1, - reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & - (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); - } - } - - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); -} - -DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); -DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); -DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); -DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); -DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); -DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); -DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); -DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); -DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); -DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); - -static struct attribute *nhmex_uncore_mbox_formats_attr[] = { - &format_attr_count_mode.attr, - &format_attr_storage_mode.attr, - &format_attr_wrap_mode.attr, - &format_attr_flag_mode.attr, - &format_attr_inc_sel.attr, - &format_attr_set_flag_sel.attr, - &format_attr_filter_cfg_en.attr, - &format_attr_filter_match.attr, - &format_attr_filter_mask.attr, - &format_attr_dsp.attr, - &format_attr_thr.attr, - &format_attr_fvc.attr, - &format_attr_pgt.attr, - &format_attr_map.attr, - &format_attr_iss.attr, - &format_attr_pld.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_mbox_format_group = { - .name = "format", - .attrs = nhmex_uncore_mbox_formats_attr, -}; - -static struct uncore_event_desc nhmex_uncore_mbox_events[] = { - INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), - INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), - { /* end: all zeroes */ }, -}; - -static struct uncore_event_desc wsmex_uncore_mbox_events[] = { - INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), - INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), - { /* end: all zeroes */ }, -}; - -static struct intel_uncore_ops nhmex_uncore_mbox_ops = { - NHMEX_UNCORE_OPS_COMMON_INIT(), - .enable_event = nhmex_mbox_msr_enable_event, - .hw_config = nhmex_mbox_hw_config, - .get_constraint = nhmex_mbox_get_constraint, - .put_constraint = nhmex_mbox_put_constraint, -}; - -static struct intel_uncore_type nhmex_uncore_mbox = { - .name = "mbox", - .num_counters = 6, - .num_boxes = 2, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_M0_MSR_PMU_CTL0, - .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, - .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, - .msr_offset = NHMEX_M_MSR_OFFSET, - .pair_ctr_ctl = 1, - .num_shared_regs = 8, - .event_descs = nhmex_uncore_mbox_events, - .ops = &nhmex_uncore_mbox_ops, - .format_group = &nhmex_uncore_mbox_format_group, -}; - -static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - /* adjust the main event selector and extra register index */ - if (reg1->idx % 2) { - reg1->idx--; - hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; - } else { - reg1->idx++; - hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; - } - - /* adjust extra register config */ - switch (reg1->idx % 6) { - case 2: - /* shift the 8~15 bits to the 0~7 bits */ - reg1->config >>= 8; - break; - case 3: - /* shift the 0~7 bits to the 8~15 bits */ - reg1->config <<= 8; - break; - }; -} - -/* - * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. - * An event set consists of 6 events, the 3rd and 4th events in - * an event set use the same extra register. So an event set uses - * 5 extra registers. - */ -static struct event_constraint * -nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - struct intel_uncore_extra_reg *er; - unsigned long flags; - int idx, er_idx; - u64 config1; - bool ok = false; - - if (!uncore_box_is_fake(box) && reg1->alloc) - return NULL; - - idx = reg1->idx % 6; - config1 = reg1->config; -again: - er_idx = idx; - /* the 3rd and 4th events use the same extra register */ - if (er_idx > 2) - er_idx--; - er_idx += (reg1->idx / 6) * 5; - - er = &box->shared_regs[er_idx]; - raw_spin_lock_irqsave(&er->lock, flags); - if (idx < 2) { - if (!atomic_read(&er->ref) || er->config == reg1->config) { - atomic_inc(&er->ref); - er->config = reg1->config; - ok = true; - } - } else if (idx == 2 || idx == 3) { - /* - * these two events use different fields in a extra register, - * the 0~7 bits and the 8~15 bits respectively. - */ - u64 mask = 0xff << ((idx - 2) * 8); - if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || - !((er->config ^ config1) & mask)) { - atomic_add(1 << ((idx - 2) * 8), &er->ref); - er->config &= ~mask; - er->config |= config1 & mask; - ok = true; - } - } else { - if (!atomic_read(&er->ref) || - (er->config == (hwc->config >> 32) && - er->config1 == reg1->config && - er->config2 == reg2->config)) { - atomic_inc(&er->ref); - er->config = (hwc->config >> 32); - er->config1 = reg1->config; - er->config2 = reg2->config; - ok = true; - } - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - if (!ok) { - /* - * The Rbox events are always in pairs. The paired - * events are functional identical, but use different - * extra registers. If we failed to take an extra - * register, try the alternative. - */ - idx ^= 1; - if (idx != reg1->idx % 6) { - if (idx == 2) - config1 >>= 8; - else if (idx == 3) - config1 <<= 8; - goto again; - } - } else { - if (!uncore_box_is_fake(box)) { - if (idx != reg1->idx % 6) - nhmex_rbox_alter_er(box, event); - reg1->alloc = 1; - } - return NULL; - } - return &constraint_empty; -} - -static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) -{ - struct intel_uncore_extra_reg *er; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - int idx, er_idx; - - if (uncore_box_is_fake(box) || !reg1->alloc) - return; - - idx = reg1->idx % 6; - er_idx = idx; - if (er_idx > 2) - er_idx--; - er_idx += (reg1->idx / 6) * 5; - - er = &box->shared_regs[er_idx]; - if (idx == 2 || idx == 3) - atomic_sub(1 << ((idx - 2) * 8), &er->ref); - else - atomic_dec(&er->ref); - - reg1->alloc = 0; -} - -static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - int idx; - - idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> - NHMEX_R_PMON_CTL_EV_SEL_SHIFT; - if (idx >= 0x18) - return -EINVAL; - - reg1->idx = idx; - reg1->config = event->attr.config1; - - switch (idx % 6) { - case 4: - case 5: - hwc->config |= event->attr.config & (~0ULL << 32); - reg2->config = event->attr.config2; - break; - }; - return 0; -} - -static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int idx, port; - - idx = reg1->idx; - port = idx / 6 + box->pmu->pmu_idx * 4; - - switch (idx % 6) { - case 0: - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); - break; - case 1: - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); - break; - case 2: - case 3: - wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), - uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); - break; - case 4: - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), - hwc->config >> 32); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); - break; - case 5: - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), - hwc->config >> 32); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); - break; - }; - - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | - (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); -} - -DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); -DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); -DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); - -static struct attribute *nhmex_uncore_rbox_formats_attr[] = { - &format_attr_event5.attr, - &format_attr_xbr_mm_cfg.attr, - &format_attr_xbr_match.attr, - &format_attr_xbr_mask.attr, - &format_attr_qlx_cfg.attr, - &format_attr_iperf_cfg.attr, - NULL, -}; - -static struct attribute_group nhmex_uncore_rbox_format_group = { - .name = "format", - .attrs = nhmex_uncore_rbox_formats_attr, -}; - -static struct uncore_event_desc nhmex_uncore_rbox_events[] = { - INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), - INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), - INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), - INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), - INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), - INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), - { /* end: all zeroes */ }, -}; - -static struct intel_uncore_ops nhmex_uncore_rbox_ops = { - NHMEX_UNCORE_OPS_COMMON_INIT(), - .enable_event = nhmex_rbox_msr_enable_event, - .hw_config = nhmex_rbox_hw_config, - .get_constraint = nhmex_rbox_get_constraint, - .put_constraint = nhmex_rbox_put_constraint, -}; - -static struct intel_uncore_type nhmex_uncore_rbox = { - .name = "rbox", - .num_counters = 8, - .num_boxes = 2, - .perf_ctr_bits = 48, - .event_ctl = NHMEX_R_MSR_PMON_CTL0, - .perf_ctr = NHMEX_R_MSR_PMON_CNT0, - .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, - .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, - .msr_offset = NHMEX_R_MSR_OFFSET, - .pair_ctr_ctl = 1, - .num_shared_regs = 20, - .event_descs = nhmex_uncore_rbox_events, - .ops = &nhmex_uncore_rbox_ops, - .format_group = &nhmex_uncore_rbox_format_group -}; - -static struct intel_uncore_type *nhmex_msr_uncores[] = { - &nhmex_uncore_ubox, - &nhmex_uncore_cbox, - &nhmex_uncore_bbox, - &nhmex_uncore_sbox, - &nhmex_uncore_mbox, - &nhmex_uncore_rbox, - &nhmex_uncore_wbox, - NULL, -}; -/* end of Nehalem-EX uncore support */ - static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) { struct hw_perf_event *hwc = &event->hw; @@ -3140,7 +166,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_eve hwc->event_base = uncore_perf_ctr(box, hwc->idx); } -static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) +void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) { u64 prev_count, new_count, delta; int shift; @@ -3201,14 +227,14 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) return HRTIMER_RESTART; } -static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) +void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) { __hrtimer_start_range_ns(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), 0, HRTIMER_MODE_REL_PINNED, 0); } -static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) +void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) { hrtimer_cancel(&box->hrtimer); } @@ -3291,7 +317,7 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve } if (event->attr.config == UNCORE_FIXED_EVENT) - return &constraint_fixed; + return &uncore_constraint_fixed; if (type->constraints) { for_each_event_constraint(c, type->constraints) { @@ -3496,7 +522,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) event->hw.last_tag = ~0ULL; } -static void uncore_pmu_event_read(struct perf_event *event) +void uncore_pmu_event_read(struct perf_event *event) { struct intel_uncore_box *box = uncore_event_to_box(event); uncore_perf_event_update(box, event); @@ -3758,9 +784,6 @@ fail: return ret; } -static struct pci_driver *uncore_pci_driver; -static bool pcidrv_registered; - /* * add a pci uncore device */ @@ -3771,17 +794,18 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id struct intel_uncore_type *type; int phys_id; - phys_id = pcibus_to_physid[pdev->bus->number]; + phys_id = uncore_pcibus_to_physid[pdev->bus->number]; if (phys_id < 0) return -ENODEV; if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { - extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev; + int idx = UNCORE_PCI_DEV_IDX(id->driver_data); + uncore_extra_pci_dev[phys_id][idx] = pdev; pci_set_drvdata(pdev, NULL); return 0; } - type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; box = uncore_alloc_box(type, NUMA_NO_NODE); if (!box) return -ENOMEM; @@ -3813,13 +837,13 @@ static void uncore_pci_remove(struct pci_dev *pdev) { struct intel_uncore_box *box = pci_get_drvdata(pdev); struct intel_uncore_pmu *pmu; - int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number]; + int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number]; box = pci_get_drvdata(pdev); if (!box) { for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { - if (extra_pci_dev[phys_id][i] == pdev) { - extra_pci_dev[phys_id][i] = NULL; + if (uncore_extra_pci_dev[phys_id][i] == pdev) { + uncore_extra_pci_dev[phys_id][i] = NULL; break; } } @@ -3854,46 +878,29 @@ static int __init uncore_pci_init(void) switch (boot_cpu_data.x86_model) { case 45: /* Sandy Bridge-EP */ - ret = snbep_pci2phy_map_init(0x3ce0); - if (ret) - return ret; - pci_uncores = snbep_pci_uncores; - uncore_pci_driver = &snbep_uncore_pci_driver; + ret = snbep_uncore_pci_init(); break; - case 62: /* IvyTown */ - ret = snbep_pci2phy_map_init(0x0e1e); - if (ret) - return ret; - pci_uncores = ivt_pci_uncores; - uncore_pci_driver = &ivt_uncore_pci_driver; + case 62: /* Ivy Bridge-EP */ + ret = ivbep_uncore_pci_init(); break; case 42: /* Sandy Bridge */ - ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC); - if (ret) - return ret; - pci_uncores = snb_pci_uncores; - uncore_pci_driver = &snb_uncore_pci_driver; + ret = snb_uncore_pci_init(); break; case 58: /* Ivy Bridge */ - ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC); - if (ret) - return ret; - pci_uncores = snb_pci_uncores; - uncore_pci_driver = &ivb_uncore_pci_driver; + ret = ivb_uncore_pci_init(); break; case 60: /* Haswell */ case 69: /* Haswell Celeron */ - ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC); - if (ret) - return ret; - pci_uncores = snb_pci_uncores; - uncore_pci_driver = &hsw_uncore_pci_driver; + ret = hsw_uncore_pci_init(); break; default: return 0; } - ret = uncore_types_init(pci_uncores); + if (ret) + return ret; + + ret = uncore_types_init(uncore_pci_uncores); if (ret) return ret; @@ -3904,7 +911,7 @@ static int __init uncore_pci_init(void) if (ret == 0) pcidrv_registered = true; else - uncore_types_exit(pci_uncores); + uncore_types_exit(uncore_pci_uncores); return ret; } @@ -3914,7 +921,7 @@ static void __init uncore_pci_exit(void) if (pcidrv_registered) { pcidrv_registered = false; pci_unregister_driver(uncore_pci_driver); - uncore_types_exit(pci_uncores); + uncore_types_exit(uncore_pci_uncores); } } @@ -3940,8 +947,8 @@ static void uncore_cpu_dying(int cpu) struct intel_uncore_box *box; int i, j; - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; + for (i = 0; uncore_msr_uncores[i]; i++) { + type = uncore_msr_uncores[i]; for (j = 0; j < type->num_boxes; j++) { pmu = &type->pmus[j]; box = *per_cpu_ptr(pmu->box, cpu); @@ -3961,8 +968,8 @@ static int uncore_cpu_starting(int cpu) phys_id = topology_physical_package_id(cpu); - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; + for (i = 0; uncore_msr_uncores[i]; i++) { + type = uncore_msr_uncores[i]; for (j = 0; j < type->num_boxes; j++) { pmu = &type->pmus[j]; box = *per_cpu_ptr(pmu->box, cpu); @@ -4002,8 +1009,8 @@ static int uncore_cpu_prepare(int cpu, int phys_id) struct intel_uncore_box *box; int i, j; - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; + for (i = 0; uncore_msr_uncores[i]; i++) { + type = uncore_msr_uncores[i]; for (j = 0; j < type->num_boxes; j++) { pmu = &type->pmus[j]; if (pmu->func_id < 0) @@ -4083,8 +1090,8 @@ static void uncore_event_exit_cpu(int cpu) if (target >= 0) cpumask_set_cpu(target, &uncore_cpu_mask); - uncore_change_context(msr_uncores, cpu, target); - uncore_change_context(pci_uncores, cpu, target); + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); } static void uncore_event_init_cpu(int cpu) @@ -4099,8 +1106,8 @@ static void uncore_event_init_cpu(int cpu) cpumask_set_cpu(cpu, &uncore_cpu_mask); - uncore_change_context(msr_uncores, -1, cpu); - uncore_change_context(pci_uncores, -1, cpu); + uncore_change_context(uncore_msr_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); } static int uncore_cpu_notifier(struct notifier_block *self, @@ -4160,47 +1167,35 @@ static void __init uncore_cpu_setup(void *dummy) static int __init uncore_cpu_init(void) { - int ret, max_cores; + int ret; - max_cores = boot_cpu_data.x86_max_cores; switch (boot_cpu_data.x86_model) { case 26: /* Nehalem */ case 30: case 37: /* Westmere */ case 44: - msr_uncores = nhm_msr_uncores; + nhm_uncore_cpu_init(); break; case 42: /* Sandy Bridge */ case 58: /* Ivy Bridge */ - if (snb_uncore_cbox.num_boxes > max_cores) - snb_uncore_cbox.num_boxes = max_cores; - msr_uncores = snb_msr_uncores; + snb_uncore_cpu_init(); break; case 45: /* Sandy Bridge-EP */ - if (snbep_uncore_cbox.num_boxes > max_cores) - snbep_uncore_cbox.num_boxes = max_cores; - msr_uncores = snbep_msr_uncores; + snbep_uncore_cpu_init(); break; case 46: /* Nehalem-EX */ - uncore_nhmex = true; case 47: /* Westmere-EX aka. Xeon E7 */ - if (!uncore_nhmex) - nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; - if (nhmex_uncore_cbox.num_boxes > max_cores) - nhmex_uncore_cbox.num_boxes = max_cores; - msr_uncores = nhmex_msr_uncores; + nhmex_uncore_cpu_init(); break; - case 62: /* IvyTown */ - if (ivt_uncore_cbox.num_boxes > max_cores) - ivt_uncore_cbox.num_boxes = max_cores; - msr_uncores = ivt_msr_uncores; + case 62: /* Ivy Bridge-EP */ + ivbep_uncore_cpu_init(); break; default: return 0; } - ret = uncore_types_init(msr_uncores); + ret = uncore_types_init(uncore_msr_uncores); if (ret) return ret; @@ -4213,16 +1208,16 @@ static int __init uncore_pmus_register(void) struct intel_uncore_type *type; int i, j; - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; + for (i = 0; uncore_msr_uncores[i]; i++) { + type = uncore_msr_uncores[i]; for (j = 0; j < type->num_boxes; j++) { pmu = &type->pmus[j]; uncore_pmu_register(pmu); } } - for (i = 0; pci_uncores[i]; i++) { - type = pci_uncores[i]; + for (i = 0; uncore_pci_uncores[i]; i++) { + type = uncore_pci_uncores[i]; for (j = 0; j < type->num_boxes; j++) { pmu = &type->pmus[j]; uncore_pmu_register(pmu); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 90236f0c94a9..1d7e89416018 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -24,395 +24,6 @@ #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) -/* SNB event control */ -#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff -#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 -#define SNB_UNC_CTL_EDGE_DET (1 << 18) -#define SNB_UNC_CTL_EN (1 << 22) -#define SNB_UNC_CTL_INVERT (1 << 23) -#define SNB_UNC_CTL_CMASK_MASK 0x1f000000 -#define NHM_UNC_CTL_CMASK_MASK 0xff000000 -#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) - -#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ - SNB_UNC_CTL_UMASK_MASK | \ - SNB_UNC_CTL_EDGE_DET | \ - SNB_UNC_CTL_INVERT | \ - SNB_UNC_CTL_CMASK_MASK) - -#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ - SNB_UNC_CTL_UMASK_MASK | \ - SNB_UNC_CTL_EDGE_DET | \ - SNB_UNC_CTL_INVERT | \ - NHM_UNC_CTL_CMASK_MASK) - -/* SNB global control register */ -#define SNB_UNC_PERF_GLOBAL_CTL 0x391 -#define SNB_UNC_FIXED_CTR_CTRL 0x394 -#define SNB_UNC_FIXED_CTR 0x395 - -/* SNB uncore global control */ -#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) -#define SNB_UNC_GLOBAL_CTL_EN (1 << 29) - -/* SNB Cbo register */ -#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 -#define SNB_UNC_CBO_0_PER_CTR0 0x706 -#define SNB_UNC_CBO_MSR_OFFSET 0x10 - -/* NHM global control register */ -#define NHM_UNC_PERF_GLOBAL_CTL 0x391 -#define NHM_UNC_FIXED_CTR 0x394 -#define NHM_UNC_FIXED_CTR_CTRL 0x395 - -/* NHM uncore global control */ -#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) -#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) - -/* NHM uncore register */ -#define NHM_UNC_PERFEVTSEL0 0x3c0 -#define NHM_UNC_UNCORE_PMC0 0x3b0 - -/* SNB-EP Box level control */ -#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) -#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) -#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) -#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) -#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ - SNBEP_PMON_BOX_CTL_RST_CTRS | \ - SNBEP_PMON_BOX_CTL_FRZ_EN) -/* SNB-EP event control */ -#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff -#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 -#define SNBEP_PMON_CTL_RST (1 << 17) -#define SNBEP_PMON_CTL_EDGE_DET (1 << 18) -#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) -#define SNBEP_PMON_CTL_EN (1 << 22) -#define SNBEP_PMON_CTL_INVERT (1 << 23) -#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 -#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_PMON_CTL_TRESH_MASK) - -/* SNB-EP Ubox event control */ -#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 -#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_U_MSR_PMON_CTL_TRESH_MASK) - -#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) -#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ - SNBEP_CBO_PMON_CTL_TID_EN) - -/* SNB-EP PCU event control */ -#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 -#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 -#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) -#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) -#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_EV_SEL_EXT | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) - -#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_RAW_EVENT_MASK | \ - SNBEP_PMON_CTL_EV_SEL_EXT) - -/* SNB-EP pci control register */ -#define SNBEP_PCI_PMON_BOX_CTL 0xf4 -#define SNBEP_PCI_PMON_CTL0 0xd8 -/* SNB-EP pci counter register */ -#define SNBEP_PCI_PMON_CTR0 0xa0 - -/* SNB-EP home agent register */ -#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 -#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 -#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 -/* SNB-EP memory controller register */ -#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 -#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 -/* SNB-EP QPI register */ -#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 -#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c -#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 -#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c - -/* SNB-EP Ubox register */ -#define SNBEP_U_MSR_PMON_CTR0 0xc16 -#define SNBEP_U_MSR_PMON_CTL0 0xc10 - -#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 -#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 - -/* SNB-EP Cbo register */ -#define SNBEP_C0_MSR_PMON_CTR0 0xd16 -#define SNBEP_C0_MSR_PMON_CTL0 0xd10 -#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 -#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 -#define SNBEP_CBO_MSR_OFFSET 0x20 - -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 - -#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ - .event = (e), \ - .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ - .config_mask = (m), \ - .idx = (i) \ -} - -/* SNB-EP PCU register */ -#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 -#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 -#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 -#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 -#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff -#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc -#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd - -/* IVT event control */ -#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ - SNBEP_PMON_BOX_CTL_RST_CTRS) -#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_TRESH_MASK) -/* IVT Ubox */ -#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00 -#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31) -#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) - -#define IVT_U_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_U_MSR_PMON_CTL_TRESH_MASK) -/* IVT Cbo */ -#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \ - SNBEP_CBO_PMON_CTL_TID_EN) - -#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) -#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) -#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) -#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) -#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) -#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) -#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) -#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63) - -/* IVT home agent */ -#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) -#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \ - (IVT_PMON_RAW_EVENT_MASK | \ - IVT_HA_PCI_PMON_CTL_Q_OCC_RST) -/* IVT PCU */ -#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_EV_SEL_EXT | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) -/* IVT QPI */ -#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \ - (IVT_PMON_RAW_EVENT_MASK | \ - SNBEP_PMON_CTL_EV_SEL_EXT) - -/* NHM-EX event control */ -#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff -#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 -#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) -#define NHMEX_PMON_CTL_EDGE_DET (1 << 18) -#define NHMEX_PMON_CTL_PMI_EN (1 << 20) -#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) -#define NHMEX_PMON_CTL_INVERT (1 << 23) -#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 -#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ - NHMEX_PMON_CTL_UMASK_MASK | \ - NHMEX_PMON_CTL_EDGE_DET | \ - NHMEX_PMON_CTL_INVERT | \ - NHMEX_PMON_CTL_TRESH_MASK) - -/* NHM-EX Ubox */ -#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 -#define NHMEX_U_MSR_PMON_CTR 0xc11 -#define NHMEX_U_MSR_PMON_EV_SEL 0xc10 - -#define NHMEX_U_PMON_GLOBAL_EN (1 << 0) -#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e -#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) -#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) -#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) - -#define NHMEX_U_PMON_RAW_EVENT_MASK \ - (NHMEX_PMON_CTL_EV_SEL_MASK | \ - NHMEX_PMON_CTL_EDGE_DET) - -/* NHM-EX Cbox */ -#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 -#define NHMEX_C0_MSR_PMON_CTR0 0xd11 -#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 -#define NHMEX_C_MSR_OFFSET 0x20 - -/* NHM-EX Bbox */ -#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 -#define NHMEX_B0_MSR_PMON_CTR0 0xc31 -#define NHMEX_B0_MSR_PMON_CTL0 0xc30 -#define NHMEX_B_MSR_OFFSET 0x40 -#define NHMEX_B0_MSR_MATCH 0xe45 -#define NHMEX_B0_MSR_MASK 0xe46 -#define NHMEX_B1_MSR_MATCH 0xe4d -#define NHMEX_B1_MSR_MASK 0xe4e - -#define NHMEX_B_PMON_CTL_EN (1 << 0) -#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 -#define NHMEX_B_PMON_CTL_EV_SEL_MASK \ - (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) -#define NHMEX_B_PMON_CTR_SHIFT 6 -#define NHMEX_B_PMON_CTR_MASK \ - (0x3 << NHMEX_B_PMON_CTR_SHIFT) -#define NHMEX_B_PMON_RAW_EVENT_MASK \ - (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ - NHMEX_B_PMON_CTR_MASK) - -/* NHM-EX Sbox */ -#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 -#define NHMEX_S0_MSR_PMON_CTR0 0xc51 -#define NHMEX_S0_MSR_PMON_CTL0 0xc50 -#define NHMEX_S_MSR_OFFSET 0x80 -#define NHMEX_S0_MSR_MM_CFG 0xe48 -#define NHMEX_S0_MSR_MATCH 0xe49 -#define NHMEX_S0_MSR_MASK 0xe4a -#define NHMEX_S1_MSR_MM_CFG 0xe58 -#define NHMEX_S1_MSR_MATCH 0xe59 -#define NHMEX_S1_MSR_MASK 0xe5a - -#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) -#define NHMEX_S_EVENT_TO_R_PROG_EV 0 - -/* NHM-EX Mbox */ -#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 -#define NHMEX_M0_MSR_PMU_DSP 0xca5 -#define NHMEX_M0_MSR_PMU_ISS 0xca6 -#define NHMEX_M0_MSR_PMU_MAP 0xca7 -#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 -#define NHMEX_M0_MSR_PMU_PGT 0xca9 -#define NHMEX_M0_MSR_PMU_PLD 0xcaa -#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab -#define NHMEX_M0_MSR_PMU_CTL0 0xcb0 -#define NHMEX_M0_MSR_PMU_CNT0 0xcb1 -#define NHMEX_M_MSR_OFFSET 0x40 -#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 -#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c - -#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) -#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL -#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL -#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 - -#define NHMEX_M_PMON_CTL_EN (1 << 0) -#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) -#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 -#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ - (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) -#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 -#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ - (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) -#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) -#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) -#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 -#define NHMEX_M_PMON_CTL_INC_SEL_MASK \ - (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) -#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 -#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ - (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) -#define NHMEX_M_PMON_RAW_EVENT_MASK \ - (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ - NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ - NHMEX_M_PMON_CTL_WRAP_MODE | \ - NHMEX_M_PMON_CTL_FLAG_MODE | \ - NHMEX_M_PMON_CTL_INC_SEL_MASK | \ - NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) - -#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) -#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) - -#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) -#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) - -/* - * use the 9~13 bits to select event If the 7th bit is not set, - * otherwise use the 19~21 bits to select event. - */ -#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) -#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ - NHMEX_M_PMON_CTL_FLAG_MODE) -#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ - NHMEX_M_PMON_CTL_FLAG_MODE) -#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ - NHMEX_M_PMON_CTL_FLAG_MODE) -#define MBOX_INC_SEL_EXTAR_REG(c, r) \ - EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ - MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) -#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ - EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ - MBOX_SET_FLAG_SEL_MASK, \ - (u64)-1, NHMEX_M_##r) - -/* NHM-EX Rbox */ -#define NHMEX_R_MSR_GLOBAL_CTL 0xe00 -#define NHMEX_R_MSR_PMON_CTL0 0xe10 -#define NHMEX_R_MSR_PMON_CNT0 0xe11 -#define NHMEX_R_MSR_OFFSET 0x20 - -#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ - ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) -#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) -#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) -#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ - (((n) < 4 ? 0 : 0x10) + (n) * 4) -#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ - (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) -#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ - (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) -#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ - (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) -#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ - (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) -#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ - (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) -#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ - (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) - -#define NHMEX_R_PMON_CTL_EN (1 << 0) -#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 -#define NHMEX_R_PMON_CTL_EV_SEL_MASK \ - (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) -#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) -#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK - -/* NHM-EX Wbox */ -#define NHMEX_W_MSR_GLOBAL_CTL 0xc80 -#define NHMEX_W_MSR_PMON_CNT0 0xc90 -#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 -#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 -#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 - -#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) - struct intel_uncore_ops; struct intel_uncore_pmu; struct intel_uncore_box; @@ -505,6 +116,9 @@ struct uncore_event_desc { const char *config; }; +ssize_t uncore_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ { \ .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ @@ -522,15 +136,6 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ static struct kobj_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) - -static ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - struct uncore_event_desc *event = - container_of(attr, struct uncore_event_desc, attr); - return sprintf(buf, "%s", event->config); -} - static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) { return box->pmu->type->box_ctl; @@ -694,3 +299,39 @@ static inline bool uncore_box_is_fake(struct intel_uncore_box *box) { return (box->phys_id < 0); } + +struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event); +struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); +struct intel_uncore_box *uncore_event_to_box(struct perf_event *event); +u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); +void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); +void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); +void uncore_pmu_event_read(struct perf_event *event); +void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); +struct event_constraint * +uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); +void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); + +extern struct intel_uncore_type **uncore_msr_uncores; +extern struct intel_uncore_type **uncore_pci_uncores; +extern struct pci_driver *uncore_pci_driver; +extern int uncore_pcibus_to_physid[256]; +extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; +extern struct event_constraint uncore_constraint_empty; + +/* perf_event_intel_uncore_snb.c */ +int snb_uncore_pci_init(void); +int ivb_uncore_pci_init(void); +int hsw_uncore_pci_init(void); +void snb_uncore_cpu_init(void); +void nhm_uncore_cpu_init(void); + +/* perf_event_intel_uncore_snbep.c */ +int snbep_uncore_pci_init(void); +void snbep_uncore_cpu_init(void); +int ivbep_uncore_pci_init(void); +void ivbep_uncore_cpu_init(void); + +/* perf_event_intel_uncore_nhmex.c */ +void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c new file mode 100644 index 000000000000..2749965afed0 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c @@ -0,0 +1,1221 @@ +/* Nehalem-EX/Westmere-EX uncore support */ +#include "perf_event_intel_uncore.h" + +/* NHM-EX event control */ +#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff +#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 +#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) +#define NHMEX_PMON_CTL_EDGE_DET (1 << 18) +#define NHMEX_PMON_CTL_PMI_EN (1 << 20) +#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) +#define NHMEX_PMON_CTL_INVERT (1 << 23) +#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 +#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ + NHMEX_PMON_CTL_UMASK_MASK | \ + NHMEX_PMON_CTL_EDGE_DET | \ + NHMEX_PMON_CTL_INVERT | \ + NHMEX_PMON_CTL_TRESH_MASK) + +/* NHM-EX Ubox */ +#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 +#define NHMEX_U_MSR_PMON_CTR 0xc11 +#define NHMEX_U_MSR_PMON_EV_SEL 0xc10 + +#define NHMEX_U_PMON_GLOBAL_EN (1 << 0) +#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e +#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) +#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) +#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) + +#define NHMEX_U_PMON_RAW_EVENT_MASK \ + (NHMEX_PMON_CTL_EV_SEL_MASK | \ + NHMEX_PMON_CTL_EDGE_DET) + +/* NHM-EX Cbox */ +#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 +#define NHMEX_C0_MSR_PMON_CTR0 0xd11 +#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 +#define NHMEX_C_MSR_OFFSET 0x20 + +/* NHM-EX Bbox */ +#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 +#define NHMEX_B0_MSR_PMON_CTR0 0xc31 +#define NHMEX_B0_MSR_PMON_CTL0 0xc30 +#define NHMEX_B_MSR_OFFSET 0x40 +#define NHMEX_B0_MSR_MATCH 0xe45 +#define NHMEX_B0_MSR_MASK 0xe46 +#define NHMEX_B1_MSR_MATCH 0xe4d +#define NHMEX_B1_MSR_MASK 0xe4e + +#define NHMEX_B_PMON_CTL_EN (1 << 0) +#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 +#define NHMEX_B_PMON_CTL_EV_SEL_MASK \ + (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) +#define NHMEX_B_PMON_CTR_SHIFT 6 +#define NHMEX_B_PMON_CTR_MASK \ + (0x3 << NHMEX_B_PMON_CTR_SHIFT) +#define NHMEX_B_PMON_RAW_EVENT_MASK \ + (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ + NHMEX_B_PMON_CTR_MASK) + +/* NHM-EX Sbox */ +#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 +#define NHMEX_S0_MSR_PMON_CTR0 0xc51 +#define NHMEX_S0_MSR_PMON_CTL0 0xc50 +#define NHMEX_S_MSR_OFFSET 0x80 +#define NHMEX_S0_MSR_MM_CFG 0xe48 +#define NHMEX_S0_MSR_MATCH 0xe49 +#define NHMEX_S0_MSR_MASK 0xe4a +#define NHMEX_S1_MSR_MM_CFG 0xe58 +#define NHMEX_S1_MSR_MATCH 0xe59 +#define NHMEX_S1_MSR_MASK 0xe5a + +#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) +#define NHMEX_S_EVENT_TO_R_PROG_EV 0 + +/* NHM-EX Mbox */ +#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 +#define NHMEX_M0_MSR_PMU_DSP 0xca5 +#define NHMEX_M0_MSR_PMU_ISS 0xca6 +#define NHMEX_M0_MSR_PMU_MAP 0xca7 +#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 +#define NHMEX_M0_MSR_PMU_PGT 0xca9 +#define NHMEX_M0_MSR_PMU_PLD 0xcaa +#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab +#define NHMEX_M0_MSR_PMU_CTL0 0xcb0 +#define NHMEX_M0_MSR_PMU_CNT0 0xcb1 +#define NHMEX_M_MSR_OFFSET 0x40 +#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 +#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c + +#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) +#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL +#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL +#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 + +#define NHMEX_M_PMON_CTL_EN (1 << 0) +#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) +#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 +#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ + (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) +#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 +#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ + (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) +#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) +#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) +#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 +#define NHMEX_M_PMON_CTL_INC_SEL_MASK \ + (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) +#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 +#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ + (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) +#define NHMEX_M_PMON_RAW_EVENT_MASK \ + (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ + NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ + NHMEX_M_PMON_CTL_WRAP_MODE | \ + NHMEX_M_PMON_CTL_FLAG_MODE | \ + NHMEX_M_PMON_CTL_INC_SEL_MASK | \ + NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) + +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) +#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) + +#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) +#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) + +/* + * use the 9~13 bits to select event If the 7th bit is not set, + * otherwise use the 19~21 bits to select event. + */ +#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) +#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_INC_SEL_EXTAR_REG(c, r) \ + EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ + MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) +#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ + EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ + MBOX_SET_FLAG_SEL_MASK, \ + (u64)-1, NHMEX_M_##r) + +/* NHM-EX Rbox */ +#define NHMEX_R_MSR_GLOBAL_CTL 0xe00 +#define NHMEX_R_MSR_PMON_CTL0 0xe10 +#define NHMEX_R_MSR_PMON_CNT0 0xe11 +#define NHMEX_R_MSR_OFFSET 0x20 + +#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ + ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) +#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) +#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) +#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ + (((n) < 4 ? 0 : 0x10) + (n) * 4) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ + (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ + (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) + +#define NHMEX_R_PMON_CTL_EN (1 << 0) +#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 +#define NHMEX_R_PMON_CTL_EV_SEL_MASK \ + (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) +#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) +#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK + +/* NHM-EX Wbox */ +#define NHMEX_W_MSR_GLOBAL_CTL 0xc80 +#define NHMEX_W_MSR_PMON_CNT0 0xc90 +#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 +#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 +#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 + +#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) + +#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ + ((1ULL << (n)) - 1))) + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); +DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); + +static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) +{ + wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); +} + +static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + u64 config; + + if (msr) { + rdmsrl(msr, config); + config &= ~((1ULL << uncore_num_counters(box)) - 1); + /* WBox has a fixed counter */ + if (uncore_msr_fixed_ctl(box)) + config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; + wrmsrl(msr, config); + } +} + +static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + u64 config; + + if (msr) { + rdmsrl(msr, config); + config |= (1ULL << uncore_num_counters(box)) - 1; + /* WBox has a fixed counter */ + if (uncore_msr_fixed_ctl(box)) + config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; + wrmsrl(msr, config); + } +} + +static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); + else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); + else + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); +} + +#define NHMEX_UNCORE_OPS_COMMON_INIT() \ + .init_box = nhmex_uncore_msr_init_box, \ + .disable_box = nhmex_uncore_msr_disable_box, \ + .enable_box = nhmex_uncore_msr_enable_box, \ + .disable_event = nhmex_uncore_msr_disable_event, \ + .read_counter = uncore_msr_read_counter + +static struct intel_uncore_ops nhmex_uncore_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_uncore_msr_enable_event, +}; + +static struct attribute *nhmex_uncore_ubox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_edge.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_ubox_format_group = { + .name = "format", + .attrs = nhmex_uncore_ubox_formats_attr, +}; + +static struct intel_uncore_type nhmex_uncore_ubox = { + .name = "ubox", + .num_counters = 1, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, + .perf_ctr = NHMEX_U_MSR_PMON_CTR, + .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_ubox_format_group +}; + +static struct attribute *nhmex_uncore_cbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_cbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_cbox_formats_attr, +}; + +/* msr offset for each instance of cbox */ +static unsigned nhmex_cbox_msr_offsets[] = { + 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, +}; + +static struct intel_uncore_type nhmex_uncore_cbox = { + .name = "cbox", + .num_counters = 6, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, + .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, + .msr_offsets = nhmex_cbox_msr_offsets, + .pair_ctr_ctl = 1, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_cbox_format_group +}; + +static struct uncore_event_desc nhmex_uncore_wbox_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type nhmex_uncore_wbox = { + .name = "wbox", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_W_MSR_PMON_CNT0, + .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, + .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, + .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, + .pair_ctr_ctl = 1, + .event_descs = nhmex_uncore_wbox_events, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_cbox_format_group +}; + +static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int ctr, ev_sel; + + ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> + NHMEX_B_PMON_CTR_SHIFT; + ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> + NHMEX_B_PMON_CTL_EV_SEL_SHIFT; + + /* events that do not use the match/mask registers */ + if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || + (ctr == 2 && ev_sel != 0x4) || ctr == 3) + return 0; + + if (box->pmu->pmu_idx == 0) + reg1->reg = NHMEX_B0_MSR_MATCH; + else + reg1->reg = NHMEX_B1_MSR_MATCH; + reg1->idx = 0; + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; + return 0; +} + +static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, reg1->config); + wrmsrl(reg1->reg + 1, reg2->config); + } + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); +} + +/* + * The Bbox has 4 counters, but each counter monitors different events. + * Use bits 6-7 in the event config to select counter. + */ +static struct event_constraint nhmex_uncore_bbox_constraints[] = { + EVENT_CONSTRAINT(0 , 1, 0xc0), + EVENT_CONSTRAINT(0x40, 2, 0xc0), + EVENT_CONSTRAINT(0x80, 4, 0xc0), + EVENT_CONSTRAINT(0xc0, 8, 0xc0), + EVENT_CONSTRAINT_END, +}; + +static struct attribute *nhmex_uncore_bbox_formats_attr[] = { + &format_attr_event5.attr, + &format_attr_counter.attr, + &format_attr_match.attr, + &format_attr_mask.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_bbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_bbox_formats_attr, +}; + +static struct intel_uncore_ops nhmex_uncore_bbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_bbox_msr_enable_event, + .hw_config = nhmex_bbox_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_bbox = { + .name = "bbox", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_B0_MSR_PMON_CTL0, + .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, + .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, + .msr_offset = NHMEX_B_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 1, + .constraints = nhmex_uncore_bbox_constraints, + .ops = &nhmex_uncore_bbox_ops, + .format_group = &nhmex_uncore_bbox_format_group +}; + +static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + /* only TO_R_PROG_EV event uses the match/mask register */ + if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != + NHMEX_S_EVENT_TO_R_PROG_EV) + return 0; + + if (box->pmu->pmu_idx == 0) + reg1->reg = NHMEX_S0_MSR_MM_CFG; + else + reg1->reg = NHMEX_S1_MSR_MM_CFG; + reg1->idx = 0; + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; + return 0; +} + +static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, 0); + wrmsrl(reg1->reg + 1, reg1->config); + wrmsrl(reg1->reg + 2, reg2->config); + wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); + } + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); +} + +static struct attribute *nhmex_uncore_sbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_match.attr, + &format_attr_mask.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_sbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_sbox_formats_attr, +}; + +static struct intel_uncore_ops nhmex_uncore_sbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_sbox_msr_enable_event, + .hw_config = nhmex_sbox_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_sbox = { + .name = "sbox", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_S0_MSR_PMON_CTL0, + .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, + .msr_offset = NHMEX_S_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 1, + .ops = &nhmex_uncore_sbox_ops, + .format_group = &nhmex_uncore_sbox_format_group +}; + +enum { + EXTRA_REG_NHMEX_M_FILTER, + EXTRA_REG_NHMEX_M_DSP, + EXTRA_REG_NHMEX_M_ISS, + EXTRA_REG_NHMEX_M_MAP, + EXTRA_REG_NHMEX_M_MSC_THR, + EXTRA_REG_NHMEX_M_PGT, + EXTRA_REG_NHMEX_M_PLD, + EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, +}; + +static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { + MBOX_INC_SEL_EXTAR_REG(0x0, DSP), + MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), + MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), + MBOX_INC_SEL_EXTAR_REG(0x9, ISS), + /* event 0xa uses two extra registers */ + MBOX_INC_SEL_EXTAR_REG(0xa, ISS), + MBOX_INC_SEL_EXTAR_REG(0xa, PLD), + MBOX_INC_SEL_EXTAR_REG(0xb, PLD), + /* events 0xd ~ 0x10 use the same extra register */ + MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0x16, PGT), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), + EVENT_EXTRA_END +}; + +/* Nehalem-EX or Westmere-EX ? */ +static bool uncore_nhmex; + +static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) +{ + struct intel_uncore_extra_reg *er; + unsigned long flags; + bool ret = false; + u64 mask; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + er = &box->shared_regs[idx]; + raw_spin_lock_irqsave(&er->lock, flags); + if (!atomic_read(&er->ref) || er->config == config) { + atomic_inc(&er->ref); + er->config = config; + ret = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + return ret; + } + /* + * The ZDP_CTL_FVC MSR has 4 fields which are used to control + * events 0xd ~ 0x10. Besides these 4 fields, there are additional + * fields which are shared. + */ + idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + if (WARN_ON_ONCE(idx >= 4)) + return false; + + /* mask of the shared fields */ + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + + raw_spin_lock_irqsave(&er->lock, flags); + /* add mask of the non-shared field if it's in use */ + if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { + if (uncore_nhmex) + mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + } + + if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { + atomic_add(1 << (idx * 8), &er->ref); + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | + NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | + WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + er->config &= ~mask; + er->config |= (config & mask); + ret = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + return ret; +} + +static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) +{ + struct intel_uncore_extra_reg *er; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + er = &box->shared_regs[idx]; + atomic_dec(&er->ref); + return; + } + + idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + atomic_sub(1 << (idx * 8), &er->ref); +} + +static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); + u64 config = reg1->config; + + /* get the non-shared control bits and shift them */ + idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + if (uncore_nhmex) + config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (new_idx > orig_idx) { + idx = new_idx - orig_idx; + config <<= 3 * idx; + } else { + idx = orig_idx - new_idx; + config >>= 3 * idx; + } + + /* add the shared control bits back */ + if (uncore_nhmex) + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + else + config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + if (modify) { + /* adjust the main event selector */ + if (new_idx > orig_idx) + hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; + else + hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; + reg1->config = config; + reg1->idx = ~0xff | new_idx; + } + return config; +} + +static struct event_constraint * +nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + int i, idx[2], alloc = 0; + u64 config1 = reg1->config; + + idx[0] = __BITS_VALUE(reg1->idx, 0, 8); + idx[1] = __BITS_VALUE(reg1->idx, 1, 8); +again: + for (i = 0; i < 2; i++) { + if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) + idx[i] = 0xff; + + if (idx[i] == 0xff) + continue; + + if (!nhmex_mbox_get_shared_reg(box, idx[i], + __BITS_VALUE(config1, i, 32))) + goto fail; + alloc |= (0x1 << i); + } + + /* for the match/mask registers */ + if (reg2->idx != EXTRA_REG_NONE && + (uncore_box_is_fake(box) || !reg2->alloc) && + !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) + goto fail; + + /* + * If it's a fake box -- as per validate_{group,event}() we + * shouldn't touch event state and we can avoid doing so + * since both will only call get_event_constraints() once + * on each event, this avoids the need for reg->alloc. + */ + if (!uncore_box_is_fake(box)) { + if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) + nhmex_mbox_alter_er(event, idx[0], true); + reg1->alloc |= alloc; + if (reg2->idx != EXTRA_REG_NONE) + reg2->alloc = 1; + } + return NULL; +fail: + if (idx[0] != 0xff && !(alloc & 0x1) && + idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + /* + * events 0xd ~ 0x10 are functional identical, but are + * controlled by different fields in the ZDP_CTL_FVC + * register. If we failed to take one field, try the + * rest 3 choices. + */ + BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); + idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + idx[0] = (idx[0] + 1) % 4; + idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { + config1 = nhmex_mbox_alter_er(event, idx[0], false); + goto again; + } + } + + if (alloc & 0x1) + nhmex_mbox_put_shared_reg(box, idx[0]); + if (alloc & 0x2) + nhmex_mbox_put_shared_reg(box, idx[1]); + return &uncore_constraint_empty; +} + +static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + + if (uncore_box_is_fake(box)) + return; + + if (reg1->alloc & 0x1) + nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); + if (reg1->alloc & 0x2) + nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); + reg1->alloc = 0; + + if (reg2->alloc) { + nhmex_mbox_put_shared_reg(box, reg2->idx); + reg2->alloc = 0; + } +} + +static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) +{ + if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) + return er->idx; + return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; +} + +static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct intel_uncore_type *type = box->pmu->type; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + struct extra_reg *er; + unsigned msr; + int reg_idx = 0; + /* + * The mbox events may require 2 extra MSRs at the most. But only + * the lower 32 bits in these MSRs are significant, so we can use + * config1 to pass two MSRs' config. + */ + for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + if (event->attr.config1 & ~er->valid_mask) + return -EINVAL; + + msr = er->msr + type->msr_offset * box->pmu->pmu_idx; + if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) + return -EINVAL; + + /* always use the 32~63 bits to pass the PLD config */ + if (er->idx == EXTRA_REG_NHMEX_M_PLD) + reg_idx = 1; + else if (WARN_ON_ONCE(reg_idx > 0)) + return -EINVAL; + + reg1->idx &= ~(0xff << (reg_idx * 8)); + reg1->reg &= ~(0xffff << (reg_idx * 16)); + reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); + reg1->reg |= msr << (reg_idx * 16); + reg1->config = event->attr.config1; + reg_idx++; + } + /* + * The mbox only provides ability to perform address matching + * for the PLD events. + */ + if (reg_idx == 2) { + reg2->idx = EXTRA_REG_NHMEX_M_FILTER; + if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) + reg2->config = event->attr.config2; + else + reg2->config = ~0ULL; + if (box->pmu->pmu_idx == 0) + reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; + else + reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; + } + return 0; +} + +static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) +{ + struct intel_uncore_extra_reg *er; + unsigned long flags; + u64 config; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) + return box->shared_regs[idx].config; + + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + raw_spin_lock_irqsave(&er->lock, flags); + config = er->config; + raw_spin_unlock_irqrestore(&er->lock, flags); + return config; +} + +static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int idx; + + idx = __BITS_VALUE(reg1->idx, 0, 8); + if (idx != 0xff) + wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), + nhmex_mbox_shared_reg_config(box, idx)); + idx = __BITS_VALUE(reg1->idx, 1, 8); + if (idx != 0xff) + wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), + nhmex_mbox_shared_reg_config(box, idx)); + + if (reg2->idx != EXTRA_REG_NONE) { + wrmsrl(reg2->reg, 0); + if (reg2->config != ~0ULL) { + wrmsrl(reg2->reg + 1, + reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); + wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); + wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + } + } + + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); +} + +DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); + +static struct attribute *nhmex_uncore_mbox_formats_attr[] = { + &format_attr_count_mode.attr, + &format_attr_storage_mode.attr, + &format_attr_wrap_mode.attr, + &format_attr_flag_mode.attr, + &format_attr_inc_sel.attr, + &format_attr_set_flag_sel.attr, + &format_attr_filter_cfg_en.attr, + &format_attr_filter_match.attr, + &format_attr_filter_mask.attr, + &format_attr_dsp.attr, + &format_attr_thr.attr, + &format_attr_fvc.attr, + &format_attr_pgt.attr, + &format_attr_map.attr, + &format_attr_iss.attr, + &format_attr_pld.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_mbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_mbox_formats_attr, +}; + +static struct uncore_event_desc nhmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc wsmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops nhmex_uncore_mbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_mbox_msr_enable_event, + .hw_config = nhmex_mbox_hw_config, + .get_constraint = nhmex_mbox_get_constraint, + .put_constraint = nhmex_mbox_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_mbox = { + .name = "mbox", + .num_counters = 6, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_M0_MSR_PMU_CTL0, + .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, + .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, + .msr_offset = NHMEX_M_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 8, + .event_descs = nhmex_uncore_mbox_events, + .ops = &nhmex_uncore_mbox_ops, + .format_group = &nhmex_uncore_mbox_format_group, +}; + +static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + + /* adjust the main event selector and extra register index */ + if (reg1->idx % 2) { + reg1->idx--; + hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + } else { + reg1->idx++; + hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + } + + /* adjust extra register config */ + switch (reg1->idx % 6) { + case 2: + /* shift the 8~15 bits to the 0~7 bits */ + reg1->config >>= 8; + break; + case 3: + /* shift the 0~7 bits to the 8~15 bits */ + reg1->config <<= 8; + break; + } +} + +/* + * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. + * An event set consists of 6 events, the 3rd and 4th events in + * an event set use the same extra register. So an event set uses + * 5 extra registers. + */ +static struct event_constraint * +nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + struct intel_uncore_extra_reg *er; + unsigned long flags; + int idx, er_idx; + u64 config1; + bool ok = false; + + if (!uncore_box_is_fake(box) && reg1->alloc) + return NULL; + + idx = reg1->idx % 6; + config1 = reg1->config; +again: + er_idx = idx; + /* the 3rd and 4th events use the same extra register */ + if (er_idx > 2) + er_idx--; + er_idx += (reg1->idx / 6) * 5; + + er = &box->shared_regs[er_idx]; + raw_spin_lock_irqsave(&er->lock, flags); + if (idx < 2) { + if (!atomic_read(&er->ref) || er->config == reg1->config) { + atomic_inc(&er->ref); + er->config = reg1->config; + ok = true; + } + } else if (idx == 2 || idx == 3) { + /* + * these two events use different fields in a extra register, + * the 0~7 bits and the 8~15 bits respectively. + */ + u64 mask = 0xff << ((idx - 2) * 8); + if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || + !((er->config ^ config1) & mask)) { + atomic_add(1 << ((idx - 2) * 8), &er->ref); + er->config &= ~mask; + er->config |= config1 & mask; + ok = true; + } + } else { + if (!atomic_read(&er->ref) || + (er->config == (hwc->config >> 32) && + er->config1 == reg1->config && + er->config2 == reg2->config)) { + atomic_inc(&er->ref); + er->config = (hwc->config >> 32); + er->config1 = reg1->config; + er->config2 = reg2->config; + ok = true; + } + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + if (!ok) { + /* + * The Rbox events are always in pairs. The paired + * events are functional identical, but use different + * extra registers. If we failed to take an extra + * register, try the alternative. + */ + idx ^= 1; + if (idx != reg1->idx % 6) { + if (idx == 2) + config1 >>= 8; + else if (idx == 3) + config1 <<= 8; + goto again; + } + } else { + if (!uncore_box_is_fake(box)) { + if (idx != reg1->idx % 6) + nhmex_rbox_alter_er(box, event); + reg1->alloc = 1; + } + return NULL; + } + return &uncore_constraint_empty; +} + +static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct intel_uncore_extra_reg *er; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + int idx, er_idx; + + if (uncore_box_is_fake(box) || !reg1->alloc) + return; + + idx = reg1->idx % 6; + er_idx = idx; + if (er_idx > 2) + er_idx--; + er_idx += (reg1->idx / 6) * 5; + + er = &box->shared_regs[er_idx]; + if (idx == 2 || idx == 3) + atomic_sub(1 << ((idx - 2) * 8), &er->ref); + else + atomic_dec(&er->ref); + + reg1->alloc = 0; +} + +static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + int idx; + + idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> + NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + if (idx >= 0x18) + return -EINVAL; + + reg1->idx = idx; + reg1->config = event->attr.config1; + + switch (idx % 6) { + case 4: + case 5: + hwc->config |= event->attr.config & (~0ULL << 32); + reg2->config = event->attr.config2; + break; + } + return 0; +} + +static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int idx, port; + + idx = reg1->idx; + port = idx / 6 + box->pmu->pmu_idx * 4; + + switch (idx % 6) { + case 0: + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); + break; + case 1: + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); + break; + case 2: + case 3: + wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), + uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); + break; + case 4: + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); + break; + case 5: + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); + break; + } + + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); +} + +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); +DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); + +static struct attribute *nhmex_uncore_rbox_formats_attr[] = { + &format_attr_event5.attr, + &format_attr_xbr_mm_cfg.attr, + &format_attr_xbr_match.attr, + &format_attr_xbr_mask.attr, + &format_attr_qlx_cfg.attr, + &format_attr_iperf_cfg.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_rbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_rbox_formats_attr, +}; + +static struct uncore_event_desc nhmex_uncore_rbox_events[] = { + INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), + INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), + INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), + INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), + INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), + INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops nhmex_uncore_rbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_rbox_msr_enable_event, + .hw_config = nhmex_rbox_hw_config, + .get_constraint = nhmex_rbox_get_constraint, + .put_constraint = nhmex_rbox_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_rbox = { + .name = "rbox", + .num_counters = 8, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_R_MSR_PMON_CTL0, + .perf_ctr = NHMEX_R_MSR_PMON_CNT0, + .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, + .msr_offset = NHMEX_R_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 20, + .event_descs = nhmex_uncore_rbox_events, + .ops = &nhmex_uncore_rbox_ops, + .format_group = &nhmex_uncore_rbox_format_group +}; + +static struct intel_uncore_type *nhmex_msr_uncores[] = { + &nhmex_uncore_ubox, + &nhmex_uncore_cbox, + &nhmex_uncore_bbox, + &nhmex_uncore_sbox, + &nhmex_uncore_mbox, + &nhmex_uncore_rbox, + &nhmex_uncore_wbox, + NULL, +}; + +void nhmex_uncore_cpu_init(void) +{ + if (boot_cpu_data.x86_model == 46) + uncore_nhmex = true; + else + nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; + if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + uncore_msr_uncores = nhmex_msr_uncores; +} +/* end of Nehalem-EX uncore support */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c new file mode 100644 index 000000000000..e0e934c8ee77 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c @@ -0,0 +1,603 @@ +/* Nehalem/SandBridge/Haswell uncore support */ +#include "perf_event_intel_uncore.h" + +/* SNB event control */ +#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff +#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 +#define SNB_UNC_CTL_EDGE_DET (1 << 18) +#define SNB_UNC_CTL_EN (1 << 22) +#define SNB_UNC_CTL_INVERT (1 << 23) +#define SNB_UNC_CTL_CMASK_MASK 0x1f000000 +#define NHM_UNC_CTL_CMASK_MASK 0xff000000 +#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ + SNB_UNC_CTL_UMASK_MASK | \ + SNB_UNC_CTL_EDGE_DET | \ + SNB_UNC_CTL_INVERT | \ + SNB_UNC_CTL_CMASK_MASK) + +#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ + SNB_UNC_CTL_UMASK_MASK | \ + SNB_UNC_CTL_EDGE_DET | \ + SNB_UNC_CTL_INVERT | \ + NHM_UNC_CTL_CMASK_MASK) + +/* SNB global control register */ +#define SNB_UNC_PERF_GLOBAL_CTL 0x391 +#define SNB_UNC_FIXED_CTR_CTRL 0x394 +#define SNB_UNC_FIXED_CTR 0x395 + +/* SNB uncore global control */ +#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) +#define SNB_UNC_GLOBAL_CTL_EN (1 << 29) + +/* SNB Cbo register */ +#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 +#define SNB_UNC_CBO_0_PER_CTR0 0x706 +#define SNB_UNC_CBO_MSR_OFFSET 0x10 + +/* NHM global control register */ +#define NHM_UNC_PERF_GLOBAL_CTL 0x391 +#define NHM_UNC_FIXED_CTR 0x394 +#define NHM_UNC_FIXED_CTR_CTRL 0x395 + +/* NHM uncore global control */ +#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) +#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* NHM uncore register */ +#define NHM_UNC_PERFEVTSEL0 0x3c0 +#define NHM_UNC_UNCORE_PMC0 0x3b0 + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); +DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); + +/* Sandy Bridge uncore support */ +static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); +} + +static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void snb_uncore_msr_init_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) { + wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, + SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); + } +} + +static struct uncore_event_desc snb_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + { /* end: all zeroes */ }, +}; + +static struct attribute *snb_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask5.attr, + NULL, +}; + +static struct attribute_group snb_uncore_format_group = { + .name = "format", + .attrs = snb_uncore_formats_attr, +}; + +static struct intel_uncore_ops snb_uncore_msr_ops = { + .init_box = snb_uncore_msr_init_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct event_constraint snb_uncore_cbox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x80, 0x1), + UNCORE_EVENT_CONSTRAINT(0x83, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type snb_uncore_cbox = { + .name = "cbox", + .num_counters = 2, + .num_boxes = 4, + .perf_ctr_bits = 44, + .fixed_ctr_bits = 48, + .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, + .fixed_ctr = SNB_UNC_FIXED_CTR, + .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, + .single_fixed = 1, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = SNB_UNC_CBO_MSR_OFFSET, + .constraints = snb_uncore_cbox_constraints, + .ops = &snb_uncore_msr_ops, + .format_group = &snb_uncore_format_group, + .event_descs = snb_uncore_events, +}; + +static struct intel_uncore_type *snb_msr_uncores[] = { + &snb_uncore_cbox, + NULL, +}; + +void snb_uncore_cpu_init(void) +{ + uncore_msr_uncores = snb_msr_uncores; + if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; +} + +enum { + SNB_PCI_UNCORE_IMC, +}; + +static struct uncore_event_desc snb_uncore_imc_events[] = { + INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), + INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), + INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), + + { /* end: all zeroes */ }, +}; + +#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff +#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 + +/* page size multiple covering all config regs */ +#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 + +#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 +#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 +#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 +#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 +#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE + +static struct attribute *snb_uncore_imc_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group snb_uncore_imc_format_group = { + .name = "format", + .attrs = snb_uncore_imc_formats_attr, +}; + +static void snb_uncore_imc_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; + resource_size_t addr; + u32 pci_dword; + + pci_read_config_dword(pdev, where, &pci_dword); + addr = pci_dword; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + pci_read_config_dword(pdev, where + 4, &pci_dword); + addr |= ((resource_size_t)pci_dword << 32); +#endif + + addr &= ~(PAGE_SIZE - 1); + + box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); + box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; +} + +static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) +{} + +static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) +{} + +static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{} + +static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{} + +static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); +} + +/* + * custom event_init() function because we define our own fixed, free + * running counters, so we do not want to conflict with generic uncore + * logic. Also simplifies processing + */ +static int snb_uncore_imc_event_init(struct perf_event *event) +{ + struct intel_uncore_pmu *pmu; + struct intel_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; + int idx, base; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + + /* check only supported bits are set */ + if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) + return -EINVAL; + + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + + event->cpu = box->cpu; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + /* + * check event is known (whitelist, determines counter) + */ + switch (cfg) { + case SNB_UNCORE_PCI_IMC_DATA_READS: + base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; + idx = UNCORE_PMC_IDX_FIXED; + break; + case SNB_UNCORE_PCI_IMC_DATA_WRITES: + base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; + idx = UNCORE_PMC_IDX_FIXED + 1; + break; + default: + return -EINVAL; + } + + /* must be done before validate_group */ + event->hw.event_base = base; + event->hw.config = cfg; + event->hw.idx = idx; + + /* no group validation needed, we have free running counters */ + + return 0; +} + +static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + return 0; +} + +static void snb_uncore_imc_event_start(struct perf_event *event, int flags) +{ + struct intel_uncore_box *box = uncore_event_to_box(event); + u64 count; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->n_active++; + + list_add_tail(&event->active_entry, &box->active_list); + + count = snb_uncore_imc_read_counter(box, event); + local64_set(&event->hw.prev_count, count); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) +{ + struct intel_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + box->n_active--; + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + list_del(&event->active_entry); + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int snb_uncore_imc_event_add(struct perf_event *event, int flags) +{ + struct intel_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (!box) + return -ENODEV; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + snb_uncore_imc_event_start(event, 0); + + box->n_events++; + + return 0; +} + +static void snb_uncore_imc_event_del(struct perf_event *event, int flags) +{ + struct intel_uncore_box *box = uncore_event_to_box(event); + int i; + + snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + --box->n_events; + break; + } + } +} + +static int snb_pci2phy_map_init(int devid) +{ + struct pci_dev *dev = NULL; + int bus; + + dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); + if (!dev) + return -ENOTTY; + + bus = dev->bus->number; + + uncore_pcibus_to_physid[bus] = 0; + + pci_dev_put(dev); + + return 0; +} + +static struct pmu snb_uncore_imc_pmu = { + .task_ctx_nr = perf_invalid_context, + .event_init = snb_uncore_imc_event_init, + .add = snb_uncore_imc_event_add, + .del = snb_uncore_imc_event_del, + .start = snb_uncore_imc_event_start, + .stop = snb_uncore_imc_event_stop, + .read = uncore_pmu_event_read, +}; + +static struct intel_uncore_ops snb_uncore_imc_ops = { + .init_box = snb_uncore_imc_init_box, + .enable_box = snb_uncore_imc_enable_box, + .disable_box = snb_uncore_imc_disable_box, + .disable_event = snb_uncore_imc_disable_event, + .enable_event = snb_uncore_imc_enable_event, + .hw_config = snb_uncore_imc_hw_config, + .read_counter = snb_uncore_imc_read_counter, +}; + +static struct intel_uncore_type snb_uncore_imc = { + .name = "imc", + .num_counters = 2, + .num_boxes = 1, + .fixed_ctr_bits = 32, + .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, + .event_descs = snb_uncore_imc_events, + .format_group = &snb_uncore_imc_format_group, + .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, + .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, + .ops = &snb_uncore_imc_ops, + .pmu = &snb_uncore_imc_pmu, +}; + +static struct intel_uncore_type *snb_pci_uncores[] = { + [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, + NULL, +}; + +static const struct pci_device_id snb_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; + +static const struct pci_device_id ivb_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; + +static const struct pci_device_id hsw_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; + +static struct pci_driver snb_uncore_pci_driver = { + .name = "snb_uncore", + .id_table = snb_uncore_pci_ids, +}; + +static struct pci_driver ivb_uncore_pci_driver = { + .name = "ivb_uncore", + .id_table = ivb_uncore_pci_ids, +}; + +static struct pci_driver hsw_uncore_pci_driver = { + .name = "hsw_uncore", + .id_table = hsw_uncore_pci_ids, +}; + +int snb_uncore_pci_init(void) +{ + int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC); + if (ret) + return ret; + uncore_pci_uncores = snb_pci_uncores; + uncore_pci_driver = &snb_uncore_pci_driver; + return 0; +} + +int ivb_uncore_pci_init(void) +{ + int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC); + if (ret) + return ret; + uncore_pci_uncores = snb_pci_uncores; + uncore_pci_driver = &ivb_uncore_pci_driver; + return 0; +} + +int hsw_uncore_pci_init(void) +{ + int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC); + if (ret) + return ret; + uncore_pci_uncores = snb_pci_uncores; + uncore_pci_driver = &hsw_uncore_pci_driver; + return 0; +} + +/* end of Sandy Bridge uncore support */ + +/* Nehalem uncore support */ +static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); +} + +static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); +} + +static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *nhm_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask8.attr, + NULL, +}; + +static struct attribute_group nhm_uncore_format_group = { + .name = "format", + .attrs = nhm_uncore_formats_attr, +}; + +static struct uncore_event_desc nhm_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), + INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), + INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), + INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), + INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), + INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), + INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), + INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops nhm_uncore_msr_ops = { + .disable_box = nhm_uncore_msr_disable_box, + .enable_box = nhm_uncore_msr_enable_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = nhm_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct intel_uncore_type nhm_uncore = { + .name = "", + .num_counters = 8, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = NHM_UNC_PERFEVTSEL0, + .perf_ctr = NHM_UNC_UNCORE_PMC0, + .fixed_ctr = NHM_UNC_FIXED_CTR, + .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, + .event_mask = NHM_UNC_RAW_EVENT_MASK, + .event_descs = nhm_uncore_events, + .ops = &nhm_uncore_msr_ops, + .format_group = &nhm_uncore_format_group, +}; + +static struct intel_uncore_type *nhm_msr_uncores[] = { + &nhm_uncore, + NULL, +}; + +void nhm_uncore_cpu_init(void) +{ + uncore_msr_uncores = nhm_msr_uncores; +} + +/* end of Nehalem uncore support */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c new file mode 100644 index 000000000000..6606ed05d311 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -0,0 +1,1645 @@ +/* SandyBridge-EP/IvyTown uncore support */ +#include "perf_event_intel_uncore.h" + + +/* SNB-EP Box level control */ +#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) +#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) +#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ + SNBEP_PMON_BOX_CTL_RST_CTRS | \ + SNBEP_PMON_BOX_CTL_FRZ_EN) +/* SNB-EP event control */ +#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff +#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 +#define SNBEP_PMON_CTL_RST (1 << 17) +#define SNBEP_PMON_CTL_EDGE_DET (1 << 18) +#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) +#define SNBEP_PMON_CTL_EN (1 << 22) +#define SNBEP_PMON_CTL_INVERT (1 << 23) +#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 +#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_INVERT | \ + SNBEP_PMON_CTL_TRESH_MASK) + +/* SNB-EP Ubox event control */ +#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 +#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_INVERT | \ + SNBEP_U_MSR_PMON_CTL_TRESH_MASK) + +#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) +#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ + SNBEP_CBO_PMON_CTL_TID_EN) + +/* SNB-EP PCU event control */ +#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 +#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 +#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) +#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) +#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_EV_SEL_EXT | \ + SNBEP_PMON_CTL_INVERT | \ + SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) + +#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_RAW_EVENT_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT) + +/* SNB-EP pci control register */ +#define SNBEP_PCI_PMON_BOX_CTL 0xf4 +#define SNBEP_PCI_PMON_CTL0 0xd8 +/* SNB-EP pci counter register */ +#define SNBEP_PCI_PMON_CTR0 0xa0 + +/* SNB-EP home agent register */ +#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 +#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 +#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 +/* SNB-EP memory controller register */ +#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 +#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 +/* SNB-EP QPI register */ +#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 +#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c +#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 +#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c + +/* SNB-EP Ubox register */ +#define SNBEP_U_MSR_PMON_CTR0 0xc16 +#define SNBEP_U_MSR_PMON_CTL0 0xc10 + +#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 +#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 + +/* SNB-EP Cbo register */ +#define SNBEP_C0_MSR_PMON_CTR0 0xd16 +#define SNBEP_C0_MSR_PMON_CTL0 0xd10 +#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 +#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 +#define SNBEP_CBO_MSR_OFFSET 0x20 + +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 +#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 + +#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ + .event = (e), \ + .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ + .config_mask = (m), \ + .idx = (i) \ +} + +/* SNB-EP PCU register */ +#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 +#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 +#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 +#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 +#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff +#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc +#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd + +/* IVBEP event control */ +#define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ + SNBEP_PMON_BOX_CTL_RST_CTRS) +#define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PMON_CTL_TRESH_MASK) +/* IVBEP Ubox */ +#define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 +#define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) +#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) + +#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_UMASK_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_U_MSR_PMON_CTL_TRESH_MASK) +/* IVBEP Cbo */ +#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ + SNBEP_CBO_PMON_CTL_TID_EN) + +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) +#define IVBEP_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63) + +/* IVBEP home agent */ +#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) +#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ + (IVBEP_PMON_RAW_EVENT_MASK | \ + IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) +/* IVBEP PCU */ +#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_CTL_EV_SEL_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ + SNBEP_PMON_CTL_EDGE_DET | \ + SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ + SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) +/* IVBEP QPI */ +#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ + (IVBEP_PMON_RAW_EVENT_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT) + +#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ + ((1ULL << (n)) - 1))) + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); +DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); +DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); +DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); +DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); +DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); +DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); +DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); +DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); +DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); +DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); +DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); +DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); +DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); +DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); +DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); +DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); +DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); +DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); +DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); +DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); +DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); +DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); +DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); +DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); + +static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= SNBEP_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~SNBEP_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); + + return count; +} + +static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + + pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); +} + +static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + u64 config; + unsigned msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= SNBEP_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + u64 config; + unsigned msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~SNBEP_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + + if (reg1->idx != EXTRA_REG_NONE) + wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); + + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + + if (msr) + wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); +} + +static struct attribute *snbep_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute *snbep_uncore_ubox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh5.attr, + NULL, +}; + +static struct attribute *snbep_uncore_cbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_tid_en.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_filter_tid.attr, + &format_attr_filter_nid.attr, + &format_attr_filter_state.attr, + &format_attr_filter_opc.attr, + NULL, +}; + +static struct attribute *snbep_uncore_pcu_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_occ_sel.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh5.attr, + &format_attr_occ_invert.attr, + &format_attr_occ_edge.attr, + &format_attr_filter_band0.attr, + &format_attr_filter_band1.attr, + &format_attr_filter_band2.attr, + &format_attr_filter_band3.attr, + NULL, +}; + +static struct attribute *snbep_uncore_qpi_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_match_rds.attr, + &format_attr_match_rnid30.attr, + &format_attr_match_rnid4.attr, + &format_attr_match_dnid.attr, + &format_attr_match_mc.attr, + &format_attr_match_opc.attr, + &format_attr_match_vnw.attr, + &format_attr_match0.attr, + &format_attr_match1.attr, + &format_attr_mask_rds.attr, + &format_attr_mask_rnid30.attr, + &format_attr_mask_rnid4.attr, + &format_attr_mask_dnid.attr, + &format_attr_mask_mc.attr, + &format_attr_mask_opc.attr, + &format_attr_mask_vnw.attr, + &format_attr_mask0.attr, + &format_attr_mask1.attr, + NULL, +}; + +static struct uncore_event_desc snbep_uncore_imc_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), + INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc snbep_uncore_qpi_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), + INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), + INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), + INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), + { /* end: all zeroes */ }, +}; + +static struct attribute_group snbep_uncore_format_group = { + .name = "format", + .attrs = snbep_uncore_formats_attr, +}; + +static struct attribute_group snbep_uncore_ubox_format_group = { + .name = "format", + .attrs = snbep_uncore_ubox_formats_attr, +}; + +static struct attribute_group snbep_uncore_cbox_format_group = { + .name = "format", + .attrs = snbep_uncore_cbox_formats_attr, +}; + +static struct attribute_group snbep_uncore_pcu_format_group = { + .name = "format", + .attrs = snbep_uncore_pcu_formats_attr, +}; + +static struct attribute_group snbep_uncore_qpi_format_group = { + .name = "format", + .attrs = snbep_uncore_qpi_formats_attr, +}; + +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + .init_box = snbep_uncore_msr_init_box, \ + .disable_box = snbep_uncore_msr_disable_box, \ + .enable_box = snbep_uncore_msr_enable_box, \ + .disable_event = snbep_uncore_msr_disable_event, \ + .enable_event = snbep_uncore_msr_enable_event, \ + .read_counter = uncore_msr_read_counter + +static struct intel_uncore_ops snbep_uncore_msr_ops = { + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), +}; + +#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ + .init_box = snbep_uncore_pci_init_box, \ + .disable_box = snbep_uncore_pci_disable_box, \ + .enable_box = snbep_uncore_pci_enable_box, \ + .disable_event = snbep_uncore_pci_disable_event, \ + .read_counter = snbep_uncore_pci_read_counter + +static struct intel_uncore_ops snbep_uncore_pci_ops = { + SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), + .enable_event = snbep_uncore_pci_enable_event, \ +}; + +static struct event_constraint snbep_uncore_cbox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x01, 0x1), + UNCORE_EVENT_CONSTRAINT(0x02, 0x3), + UNCORE_EVENT_CONSTRAINT(0x04, 0x3), + UNCORE_EVENT_CONSTRAINT(0x05, 0x3), + UNCORE_EVENT_CONSTRAINT(0x07, 0x3), + UNCORE_EVENT_CONSTRAINT(0x09, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x1), + UNCORE_EVENT_CONSTRAINT(0x12, 0x3), + UNCORE_EVENT_CONSTRAINT(0x13, 0x3), + UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), + UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), + UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), + UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), + EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), + UNCORE_EVENT_CONSTRAINT(0x21, 0x3), + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + UNCORE_EVENT_CONSTRAINT(0x31, 0x3), + UNCORE_EVENT_CONSTRAINT(0x32, 0x3), + UNCORE_EVENT_CONSTRAINT(0x33, 0x3), + UNCORE_EVENT_CONSTRAINT(0x34, 0x3), + UNCORE_EVENT_CONSTRAINT(0x35, 0x3), + UNCORE_EVENT_CONSTRAINT(0x36, 0x1), + UNCORE_EVENT_CONSTRAINT(0x37, 0x3), + UNCORE_EVENT_CONSTRAINT(0x38, 0x3), + UNCORE_EVENT_CONSTRAINT(0x39, 0x3), + UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct event_constraint snbep_uncore_r2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x10, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x3), + UNCORE_EVENT_CONSTRAINT(0x12, 0x1), + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + UNCORE_EVENT_CONSTRAINT(0x24, 0x3), + UNCORE_EVENT_CONSTRAINT(0x25, 0x3), + UNCORE_EVENT_CONSTRAINT(0x26, 0x3), + UNCORE_EVENT_CONSTRAINT(0x32, 0x3), + UNCORE_EVENT_CONSTRAINT(0x33, 0x3), + UNCORE_EVENT_CONSTRAINT(0x34, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct event_constraint snbep_uncore_r3qpi_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x10, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x3), + UNCORE_EVENT_CONSTRAINT(0x12, 0x3), + UNCORE_EVENT_CONSTRAINT(0x13, 0x1), + UNCORE_EVENT_CONSTRAINT(0x20, 0x3), + UNCORE_EVENT_CONSTRAINT(0x21, 0x3), + UNCORE_EVENT_CONSTRAINT(0x22, 0x3), + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + UNCORE_EVENT_CONSTRAINT(0x24, 0x3), + UNCORE_EVENT_CONSTRAINT(0x25, 0x3), + UNCORE_EVENT_CONSTRAINT(0x26, 0x3), + UNCORE_EVENT_CONSTRAINT(0x28, 0x3), + UNCORE_EVENT_CONSTRAINT(0x29, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), + UNCORE_EVENT_CONSTRAINT(0x30, 0x3), + UNCORE_EVENT_CONSTRAINT(0x31, 0x3), + UNCORE_EVENT_CONSTRAINT(0x32, 0x3), + UNCORE_EVENT_CONSTRAINT(0x33, 0x3), + UNCORE_EVENT_CONSTRAINT(0x34, 0x3), + UNCORE_EVENT_CONSTRAINT(0x36, 0x3), + UNCORE_EVENT_CONSTRAINT(0x37, 0x3), + UNCORE_EVENT_CONSTRAINT(0x38, 0x3), + UNCORE_EVENT_CONSTRAINT(0x39, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type snbep_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 44, + .fixed_ctr_bits = 48, + .perf_ctr = SNBEP_U_MSR_PMON_CTR0, + .event_ctl = SNBEP_U_MSR_PMON_CTL0, + .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, + .ops = &snbep_uncore_msr_ops, + .format_group = &snbep_uncore_ubox_format_group, +}; + +static struct extra_reg snbep_uncore_cbox_extra_regs[] = { + SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, + SNBEP_CBO_PMON_CTL_TID_EN, 0x1), + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), + SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), + SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), + EVENT_EXTRA_END +}; + +static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + int i; + + if (uncore_box_is_fake(box)) + return; + + for (i = 0; i < 5; i++) { + if (reg1->alloc & (0x1 << i)) + atomic_sub(1 << (i * 6), &er->ref); + } + reg1->alloc = 0; +} + +static struct event_constraint * +__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, + u64 (*cbox_filter_mask)(int fields)) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + int i, alloc = 0; + unsigned long flags; + u64 mask; + + if (reg1->idx == EXTRA_REG_NONE) + return NULL; + + raw_spin_lock_irqsave(&er->lock, flags); + for (i = 0; i < 5; i++) { + if (!(reg1->idx & (0x1 << i))) + continue; + if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) + continue; + + mask = cbox_filter_mask(0x1 << i); + if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || + !((reg1->config ^ er->config) & mask)) { + atomic_add(1 << (i * 6), &er->ref); + er->config &= ~mask; + er->config |= reg1->config & mask; + alloc |= (0x1 << i); + } else { + break; + } + } + raw_spin_unlock_irqrestore(&er->lock, flags); + if (i < 5) + goto fail; + + if (!uncore_box_is_fake(box)) + reg1->alloc |= alloc; + + return NULL; +fail: + for (; i >= 0; i--) { + if (alloc & (0x1 << i)) + atomic_sub(1 << (i * 6), &er->ref); + } + return &uncore_constraint_empty; +} + +static u64 snbep_cbox_filter_mask(int fields) +{ + u64 mask = 0; + + if (fields & 0x1) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; + if (fields & 0x2) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; + if (fields & 0x4) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; + if (fields & 0x8) + mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; + + return mask; +} + +static struct event_constraint * +snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); +} + +static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct extra_reg *er; + int idx = 0; + + for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + idx |= er->idx; + } + + if (idx) { + reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + + SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); + reg1->idx = idx; + } + return 0; +} + +static struct intel_uncore_ops snbep_uncore_cbox_ops = { + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_cbox_hw_config, + .get_constraint = snbep_cbox_get_constraint, + .put_constraint = snbep_cbox_put_constraint, +}; + +static struct intel_uncore_type snbep_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 44, + .event_ctl = SNBEP_C0_MSR_PMON_CTL0, + .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, + .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = SNBEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = snbep_uncore_cbox_constraints, + .ops = &snbep_uncore_cbox_ops, + .format_group = &snbep_uncore_cbox_format_group, +}; + +static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + u64 config = reg1->config; + + if (new_idx > reg1->idx) + config <<= 8 * (new_idx - reg1->idx); + else + config >>= 8 * (reg1->idx - new_idx); + + if (modify) { + hwc->config += new_idx - reg1->idx; + reg1->config = config; + reg1->idx = new_idx; + } + return config; +} + +static struct event_constraint * +snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + unsigned long flags; + int idx = reg1->idx; + u64 mask, config1 = reg1->config; + bool ok = false; + + if (reg1->idx == EXTRA_REG_NONE || + (!uncore_box_is_fake(box) && reg1->alloc)) + return NULL; +again: + mask = 0xffULL << (idx * 8); + raw_spin_lock_irqsave(&er->lock, flags); + if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || + !((config1 ^ er->config) & mask)) { + atomic_add(1 << (idx * 8), &er->ref); + er->config &= ~mask; + er->config |= config1 & mask; + ok = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + if (!ok) { + idx = (idx + 1) % 4; + if (idx != reg1->idx) { + config1 = snbep_pcu_alter_er(event, idx, false); + goto again; + } + return &uncore_constraint_empty; + } + + if (!uncore_box_is_fake(box)) { + if (idx != reg1->idx) + snbep_pcu_alter_er(event, idx, true); + reg1->alloc = 1; + } + return NULL; +} + +static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct intel_uncore_extra_reg *er = &box->shared_regs[0]; + + if (uncore_box_is_fake(box) || !reg1->alloc) + return; + + atomic_sub(1 << (reg1->idx * 8), &er->ref); + reg1->alloc = 0; +} + +static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; + + if (ev_sel >= 0xb && ev_sel <= 0xe) { + reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; + reg1->idx = ev_sel - 0xb; + reg1->config = event->attr.config1 & (0xff << reg1->idx); + } + return 0; +} + +static struct intel_uncore_ops snbep_uncore_pcu_ops = { + SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + +static struct intel_uncore_type snbep_uncore_pcu = { + .name = "pcu", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, + .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, + .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &snbep_uncore_pcu_ops, + .format_group = &snbep_uncore_pcu_format_group, +}; + +static struct intel_uncore_type *snbep_msr_uncores[] = { + &snbep_uncore_ubox, + &snbep_uncore_cbox, + &snbep_uncore_pcu, + NULL, +}; + +void snbep_uncore_cpu_init(void) +{ + if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + uncore_msr_uncores = snbep_msr_uncores; +} + +enum { + SNBEP_PCI_QPI_PORT0_FILTER, + SNBEP_PCI_QPI_PORT1_FILTER, +}; + +static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { + reg1->idx = 0; + reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; + reg1->config = event->attr.config1; + reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; + reg2->config = event->attr.config2; + } + return 0; +} + +static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; + struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx]; + WARN_ON_ONCE(!filter_pdev); + if (filter_pdev) { + pci_write_config_dword(filter_pdev, reg1->reg, + (u32)reg1->config); + pci_write_config_dword(filter_pdev, reg1->reg + 4, + (u32)(reg1->config >> 32)); + pci_write_config_dword(filter_pdev, reg2->reg, + (u32)reg2->config); + pci_write_config_dword(filter_pdev, reg2->reg + 4, + (u32)(reg2->config >> 32)); + } + } + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops snbep_uncore_qpi_ops = { + SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), + .enable_event = snbep_qpi_enable_event, + .hw_config = snbep_qpi_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +#define SNBEP_UNCORE_PCI_COMMON_INIT() \ + .perf_ctr = SNBEP_PCI_PMON_CTR0, \ + .event_ctl = SNBEP_PCI_PMON_CTL0, \ + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ + .ops = &snbep_uncore_pci_ops, \ + .format_group = &snbep_uncore_format_group + +static struct intel_uncore_type snbep_uncore_ha = { + .name = "ha", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type snbep_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + .event_descs = snbep_uncore_imc_events, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type snbep_uncore_qpi = { + .name = "qpi", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &snbep_uncore_qpi_ops, + .event_descs = snbep_uncore_qpi_events, + .format_group = &snbep_uncore_qpi_format_group, +}; + + +static struct intel_uncore_type snbep_uncore_r2pcie = { + .name = "r2pcie", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r2pcie_constraints, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type snbep_uncore_r3qpi = { + .name = "r3qpi", + .num_counters = 3, + .num_boxes = 2, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r3qpi_constraints, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +enum { + SNBEP_PCI_UNCORE_HA, + SNBEP_PCI_UNCORE_IMC, + SNBEP_PCI_UNCORE_QPI, + SNBEP_PCI_UNCORE_R2PCIE, + SNBEP_PCI_UNCORE_R3QPI, +}; + +static struct intel_uncore_type *snbep_pci_uncores[] = { + [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, + [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, + [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, + [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, + [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, + NULL, +}; + +static const struct pci_device_id snbep_uncore_pci_ids[] = { + { /* Home Agent */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), + }, + { /* MC Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), + }, + { /* MC Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), + }, + { /* MC Channel 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), + }, + { /* MC Channel 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), + }, + { /* QPI Port 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), + }, + { /* QPI Port 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), + }, + { /* R2PCIe */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), + }, + { /* R3QPI Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), + }, + { /* R3QPI Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), + .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), + }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT0_FILTER), + }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT1_FILTER), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver snbep_uncore_pci_driver = { + .name = "snbep_uncore", + .id_table = snbep_uncore_pci_ids, +}; + +/* + * build pci bus to socket mapping + */ +static int snbep_pci2phy_map_init(int devid) +{ + struct pci_dev *ubox_dev = NULL; + int i, bus, nodeid; + int err = 0; + u32 config = 0; + + while (1) { + /* find the UBOX device */ + ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); + if (!ubox_dev) + break; + bus = ubox_dev->bus->number; + /* get the Node ID of the local register */ + err = pci_read_config_dword(ubox_dev, 0x40, &config); + if (err) + break; + nodeid = config; + /* get the Node ID mapping */ + err = pci_read_config_dword(ubox_dev, 0x54, &config); + if (err) + break; + /* + * every three bits in the Node ID mapping register maps + * to a particular node. + */ + for (i = 0; i < 8; i++) { + if (nodeid == ((config >> (3 * i)) & 0x7)) { + uncore_pcibus_to_physid[bus] = i; + break; + } + } + } + + if (!err) { + /* + * For PCI bus with no UBOX device, find the next bus + * that has UBOX device and use its mapping. + */ + i = -1; + for (bus = 255; bus >= 0; bus--) { + if (uncore_pcibus_to_physid[bus] >= 0) + i = uncore_pcibus_to_physid[bus]; + else + uncore_pcibus_to_physid[bus] = i; + } + } + + if (ubox_dev) + pci_dev_put(ubox_dev); + + return err ? pcibios_err_to_errno(err) : 0; +} + +int snbep_uncore_pci_init(void) +{ + int ret = snbep_pci2phy_map_init(0x3ce0); + if (ret) + return ret; + uncore_pci_uncores = snbep_pci_uncores; + uncore_pci_driver = &snbep_uncore_pci_driver; + return 0; +} +/* end of Sandy Bridge-EP uncore support */ + +/* IvyTown uncore support */ +static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + if (msr) + wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); +} + +static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + + pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); +} + +#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + .init_box = ivbep_uncore_msr_init_box, \ + .disable_box = snbep_uncore_msr_disable_box, \ + .enable_box = snbep_uncore_msr_enable_box, \ + .disable_event = snbep_uncore_msr_disable_event, \ + .enable_event = snbep_uncore_msr_enable_event, \ + .read_counter = uncore_msr_read_counter + +static struct intel_uncore_ops ivbep_uncore_msr_ops = { + IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), +}; + +static struct intel_uncore_ops ivbep_uncore_pci_ops = { + .init_box = ivbep_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +#define IVBEP_UNCORE_PCI_COMMON_INIT() \ + .perf_ctr = SNBEP_PCI_PMON_CTR0, \ + .event_ctl = SNBEP_PCI_PMON_CTL0, \ + .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ + .ops = &ivbep_uncore_pci_ops, \ + .format_group = &ivbep_uncore_format_group + +static struct attribute *ivbep_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute *ivbep_uncore_ubox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh5.attr, + NULL, +}; + +static struct attribute *ivbep_uncore_cbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_tid_en.attr, + &format_attr_thresh8.attr, + &format_attr_filter_tid.attr, + &format_attr_filter_link.attr, + &format_attr_filter_state2.attr, + &format_attr_filter_nid2.attr, + &format_attr_filter_opc2.attr, + NULL, +}; + +static struct attribute *ivbep_uncore_pcu_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_occ_sel.attr, + &format_attr_edge.attr, + &format_attr_thresh5.attr, + &format_attr_occ_invert.attr, + &format_attr_occ_edge.attr, + &format_attr_filter_band0.attr, + &format_attr_filter_band1.attr, + &format_attr_filter_band2.attr, + &format_attr_filter_band3.attr, + NULL, +}; + +static struct attribute *ivbep_uncore_qpi_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_thresh8.attr, + &format_attr_match_rds.attr, + &format_attr_match_rnid30.attr, + &format_attr_match_rnid4.attr, + &format_attr_match_dnid.attr, + &format_attr_match_mc.attr, + &format_attr_match_opc.attr, + &format_attr_match_vnw.attr, + &format_attr_match0.attr, + &format_attr_match1.attr, + &format_attr_mask_rds.attr, + &format_attr_mask_rnid30.attr, + &format_attr_mask_rnid4.attr, + &format_attr_mask_dnid.attr, + &format_attr_mask_mc.attr, + &format_attr_mask_opc.attr, + &format_attr_mask_vnw.attr, + &format_attr_mask0.attr, + &format_attr_mask1.attr, + NULL, +}; + +static struct attribute_group ivbep_uncore_format_group = { + .name = "format", + .attrs = ivbep_uncore_formats_attr, +}; + +static struct attribute_group ivbep_uncore_ubox_format_group = { + .name = "format", + .attrs = ivbep_uncore_ubox_formats_attr, +}; + +static struct attribute_group ivbep_uncore_cbox_format_group = { + .name = "format", + .attrs = ivbep_uncore_cbox_formats_attr, +}; + +static struct attribute_group ivbep_uncore_pcu_format_group = { + .name = "format", + .attrs = ivbep_uncore_pcu_formats_attr, +}; + +static struct attribute_group ivbep_uncore_qpi_format_group = { + .name = "format", + .attrs = ivbep_uncore_qpi_formats_attr, +}; + +static struct intel_uncore_type ivbep_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 44, + .fixed_ctr_bits = 48, + .perf_ctr = SNBEP_U_MSR_PMON_CTR0, + .event_ctl = SNBEP_U_MSR_PMON_CTL0, + .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_ubox_format_group, +}; + +static struct extra_reg ivbep_uncore_cbox_extra_regs[] = { + SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, + SNBEP_CBO_PMON_CTL_TID_EN, 0x1), + SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), + SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), + EVENT_EXTRA_END +}; + +static u64 ivbep_cbox_filter_mask(int fields) +{ + u64 mask = 0; + + if (fields & 0x1) + mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; + if (fields & 0x2) + mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; + if (fields & 0x4) + mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; + if (fields & 0x8) + mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; + if (fields & 0x10) + mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; + + return mask; +} + +static struct event_constraint * +ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); +} + +static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct extra_reg *er; + int idx = 0; + + for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + idx |= er->idx; + } + + if (idx) { + reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + + SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); + reg1->idx = idx; + } + return 0; +} + +static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + u64 filter = uncore_shared_reg_config(box, 0); + wrmsrl(reg1->reg, filter & 0xffffffff); + wrmsrl(reg1->reg + 6, filter >> 32); + } + + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops ivbep_uncore_cbox_ops = { + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = ivbep_cbox_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = ivbep_cbox_hw_config, + .get_constraint = ivbep_cbox_get_constraint, + .put_constraint = snbep_cbox_put_constraint, +}; + +static struct intel_uncore_type ivbep_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .num_boxes = 15, + .perf_ctr_bits = 44, + .event_ctl = SNBEP_C0_MSR_PMON_CTL0, + .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, + .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = SNBEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = snbep_uncore_cbox_constraints, + .ops = &ivbep_uncore_cbox_ops, + .format_group = &ivbep_uncore_cbox_format_group, +}; + +static struct intel_uncore_ops ivbep_uncore_pcu_ops = { + IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snbep_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + +static struct intel_uncore_type ivbep_uncore_pcu = { + .name = "pcu", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, + .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, + .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &ivbep_uncore_pcu_ops, + .format_group = &ivbep_uncore_pcu_format_group, +}; + +static struct intel_uncore_type *ivbep_msr_uncores[] = { + &ivbep_uncore_ubox, + &ivbep_uncore_cbox, + &ivbep_uncore_pcu, + NULL, +}; + +void ivbep_uncore_cpu_init(void) +{ + if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + uncore_msr_uncores = ivbep_msr_uncores; +} + +static struct intel_uncore_type ivbep_uncore_ha = { + .name = "ha", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + IVBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type ivbep_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + .event_descs = snbep_uncore_imc_events, + IVBEP_UNCORE_PCI_COMMON_INIT(), +}; + +/* registers in IRP boxes are not properly aligned */ +static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; +static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; + +static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], + hwc->config | SNBEP_PMON_CTL_EN); +} + +static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config); +} + +static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); + pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); + + return count; +} + +static struct intel_uncore_ops ivbep_uncore_irp_ops = { + .init_box = ivbep_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = ivbep_uncore_irp_disable_event, + .enable_event = ivbep_uncore_irp_enable_event, + .read_counter = ivbep_uncore_irp_read_counter, +}; + +static struct intel_uncore_type ivbep_uncore_irp = { + .name = "irp", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_mask = IVBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &ivbep_uncore_irp_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static struct intel_uncore_ops ivbep_uncore_qpi_ops = { + .init_box = ivbep_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_qpi_enable_event, + .read_counter = snbep_uncore_pci_read_counter, + .hw_config = snbep_qpi_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +static struct intel_uncore_type ivbep_uncore_qpi = { + .name = "qpi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &ivbep_uncore_qpi_ops, + .format_group = &ivbep_uncore_qpi_format_group, +}; + +static struct intel_uncore_type ivbep_uncore_r2pcie = { + .name = "r2pcie", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r2pcie_constraints, + IVBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type ivbep_uncore_r3qpi = { + .name = "r3qpi", + .num_counters = 3, + .num_boxes = 2, + .perf_ctr_bits = 44, + .constraints = snbep_uncore_r3qpi_constraints, + IVBEP_UNCORE_PCI_COMMON_INIT(), +}; + +enum { + IVBEP_PCI_UNCORE_HA, + IVBEP_PCI_UNCORE_IMC, + IVBEP_PCI_UNCORE_IRP, + IVBEP_PCI_UNCORE_QPI, + IVBEP_PCI_UNCORE_R2PCIE, + IVBEP_PCI_UNCORE_R3QPI, +}; + +static struct intel_uncore_type *ivbep_pci_uncores[] = { + [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, + [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, + [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, + [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, + [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, + [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, + NULL, +}; + +static const struct pci_device_id ivbep_uncore_pci_ids[] = { + { /* Home Agent 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), + }, + { /* Home Agent 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), + }, + { /* MC0 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), + }, + { /* MC0 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), + }, + { /* MC0 Channel 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), + }, + { /* MC0 Channel 4 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), + }, + { /* MC1 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), + }, + { /* MC1 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), + }, + { /* MC1 Channel 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), + }, + { /* MC1 Channel 4 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), + }, + { /* IRP */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), + }, + { /* QPI0 Port 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), + }, + { /* QPI0 Port 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), + }, + { /* QPI1 Port 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), + }, + { /* R2PCIe */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), + }, + { /* R3QPI0 Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), + }, + { /* R3QPI0 Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), + }, + { /* R3QPI1 Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), + .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), + }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT0_FILTER), + }, + { /* QPI Port 0 filter */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT1_FILTER), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver ivbep_uncore_pci_driver = { + .name = "ivbep_uncore", + .id_table = ivbep_uncore_pci_ids, +}; + +int ivbep_uncore_pci_init(void) +{ + int ret = snbep_pci2phy_map_init(0x0e1e); + if (ret) + return ret; + uncore_pci_uncores = ivbep_pci_uncores; + uncore_pci_driver = &ivbep_uncore_pci_driver; + return 0; +} +/* end of IvyTown uncore support */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 784304b222b3..98f923b6a0ea 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -8,28 +8,28 @@ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> * * Jump labels provide an interface to generate dynamic branches using - * self-modifying code. Assuming toolchain and architecture support the result - * of a "if (static_key_false(&key))" statement is a unconditional branch (which + * self-modifying code. Assuming toolchain and architecture support, the result + * of a "if (static_key_false(&key))" statement is an unconditional branch (which * defaults to false - and the true block is placed out of line). * * However at runtime we can change the branch target using * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key - * object and for as long as there are references all branches referring to + * object, and for as long as there are references all branches referring to * that particular key will point to the (out of line) true block. * - * Since this relies on modifying code the static_key_slow_{inc,dec}() functions + * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions * must be considered absolute slow paths (machine wide synchronization etc.). - * OTOH, since the affected branches are unconditional their runtime overhead + * OTOH, since the affected branches are unconditional, their runtime overhead * will be absolutely minimal, esp. in the default (off) case where the total * effect is a single NOP of appropriate size. The on case will patch in a jump * to the out-of-line block. * - * When the control is directly exposed to userspace it is prudent to delay the + * When the control is directly exposed to userspace, it is prudent to delay the * decrement to avoid high frequency code modifications which can (and do) * cause significant performance degradation. Struct static_key_deferred and * static_key_slow_dec_deferred() provide for this. * - * Lacking toolchain and or architecture support, it falls back to a simple + * Lacking toolchain and or architecture support, jump labels fall back to a simple * conditional branch. * * struct static_key my_key = STATIC_KEY_INIT_TRUE; @@ -43,8 +43,7 @@ * * Not initializing the key (static data is initialized to 0s anyway) is the * same as using STATIC_KEY_INIT_FALSE. - * -*/ + */ #include <linux/types.h> #include <linux/compiler.h> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 707617a8c0f6..893a0d07986f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -52,6 +52,7 @@ struct perf_guest_info_callbacks { #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> +#include <linux/workqueue.h> #include <asm/local.h> struct perf_callchain_entry { @@ -268,6 +269,7 @@ struct pmu { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { + PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, @@ -507,6 +509,9 @@ struct perf_event_context { int nr_cgroups; /* cgroup evts */ int nr_branch_stack; /* branch_stack evt */ struct rcu_head rcu_head; + + struct delayed_work orphans_remove; + bool orphans_remove_sched; }; /* @@ -604,6 +609,13 @@ struct perf_sample_data { u64 txn; }; +/* default value for data source */ +#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) + static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { @@ -616,7 +628,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->regs_user.regs = NULL; data->stack_user_size = 0; data->weight = 0; - data->data_src.val = 0; + data->data_src.val = PERF_MEM_NA; data->txn = 0; } diff --git a/kernel/events/core.c b/kernel/events/core.c index f9c1ed002dbc..01bd42ed516c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -47,6 +47,8 @@ #include <asm/irq_regs.h> +static struct workqueue_struct *perf_wq; + struct remote_function_call { struct task_struct *p; int (*func)(void *info); @@ -120,6 +122,13 @@ static int cpu_function_call(int cpu, int (*func) (void *info), void *info) return data.ret; } +#define EVENT_OWNER_KERNEL ((void *) -1) + +static bool is_kernel_event(struct perf_event *event) +{ + return event->owner == EVENT_OWNER_KERNEL; +} + #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ PERF_FLAG_FD_OUTPUT |\ PERF_FLAG_PID_CGROUP |\ @@ -1375,6 +1384,45 @@ out: perf_event__header_size(tmp); } +/* + * User event without the task. + */ +static bool is_orphaned_event(struct perf_event *event) +{ + return event && !is_kernel_event(event) && !event->owner; +} + +/* + * Event has a parent but parent's task finished and it's + * alive only because of children holding refference. + */ +static bool is_orphaned_child(struct perf_event *event) +{ + return is_orphaned_event(event->parent); +} + +static void orphans_remove_work(struct work_struct *work); + +static void schedule_orphans_remove(struct perf_event_context *ctx) +{ + if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) + return; + + if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { + get_ctx(ctx); + ctx->orphans_remove_sched = true; + } +} + +static int __init perf_workqueue_init(void) +{ + perf_wq = create_singlethread_workqueue("perf"); + WARN(!perf_wq, "failed to create perf workqueue\n"); + return perf_wq ? 0 : -1; +} + +core_initcall(perf_workqueue_init); + static inline int event_filter_match(struct perf_event *event) { @@ -1424,6 +1472,9 @@ event_sched_out(struct perf_event *event, if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; + if (is_orphaned_child(event)) + schedule_orphans_remove(ctx); + perf_pmu_enable(event->pmu); } @@ -1726,6 +1777,9 @@ event_sched_in(struct perf_event *event, if (event->attr.exclusive) cpuctx->exclusive = 1; + if (is_orphaned_child(event)) + schedule_orphans_remove(ctx); + out: perf_pmu_enable(event->pmu); @@ -3068,6 +3122,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx) INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); + INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); } static struct perf_event_context * @@ -3313,16 +3368,12 @@ static void free_event(struct perf_event *event) } /* - * Called when the last reference to the file is gone. + * Remove user event from the owner task. */ -static void put_event(struct perf_event *event) +static void perf_remove_from_owner(struct perf_event *event) { - struct perf_event_context *ctx = event->ctx; struct task_struct *owner; - if (!atomic_long_dec_and_test(&event->refcount)) - return; - rcu_read_lock(); owner = ACCESS_ONCE(event->owner); /* @@ -3355,6 +3406,20 @@ static void put_event(struct perf_event *event) mutex_unlock(&owner->perf_event_mutex); put_task_struct(owner); } +} + +/* + * Called when the last reference to the file is gone. + */ +static void put_event(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + + if (!atomic_long_dec_and_test(&event->refcount)) + return; + + if (!is_kernel_event(event)) + perf_remove_from_owner(event); WARN_ON_ONCE(ctx->parent_ctx); /* @@ -3389,6 +3454,42 @@ static int perf_release(struct inode *inode, struct file *file) return 0; } +/* + * Remove all orphanes events from the context. + */ +static void orphans_remove_work(struct work_struct *work) +{ + struct perf_event_context *ctx; + struct perf_event *event, *tmp; + + ctx = container_of(work, struct perf_event_context, + orphans_remove.work); + + mutex_lock(&ctx->mutex); + list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { + struct perf_event *parent_event = event->parent; + + if (!is_orphaned_child(event)) + continue; + + perf_remove_from_context(event, true); + + mutex_lock(&parent_event->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent_event->child_mutex); + + free_event(event); + put_event(parent_event); + } + + raw_spin_lock_irq(&ctx->lock); + ctx->orphans_remove_sched = false; + raw_spin_unlock_irq(&ctx->lock); + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); +} + u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) { struct perf_event *child; @@ -3500,7 +3601,8 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count) * error state (i.e. because it was pinned but it couldn't be * scheduled on to the CPU at some point). */ - if (event->state == PERF_EVENT_STATE_ERROR) + if ((event->state == PERF_EVENT_STATE_ERROR) || + (event->state == PERF_EVENT_STATE_EXIT)) return 0; if (count < event->read_size) @@ -3527,7 +3629,12 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_event *event = file->private_data; struct ring_buffer *rb; - unsigned int events = POLL_HUP; + unsigned int events = POLLHUP; + + poll_wait(file, &event->waitq, wait); + + if (event->state == PERF_EVENT_STATE_EXIT) + return events; /* * Pin the event->rb by taking event->mmap_mutex; otherwise @@ -3538,9 +3645,6 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) if (rb) events = atomic_xchg(&rb->poll, 0); mutex_unlock(&event->mmap_mutex); - - poll_wait(file, &event->waitq, wait); - return events; } @@ -7387,6 +7491,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err; } + /* Mark owner so we could distinguish it from user events. */ + event->owner = EVENT_OWNER_KERNEL; + account_event(event); ctx = find_get_context(event->pmu, task, cpu); @@ -7507,6 +7614,9 @@ __perf_event_exit_task(struct perf_event *child_event, if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); + } else { + child_event->state = PERF_EVENT_STATE_EXIT; + perf_event_wakeup(child_event); } } @@ -7710,7 +7820,8 @@ inherit_event(struct perf_event *parent_event, if (IS_ERR(child_event)) return child_event; - if (!atomic_long_inc_not_zero(&parent_event->refcount)) { + if (is_orphaned_event(parent_event) || + !atomic_long_inc_not_zero(&parent_event->refcount)) { free_event(child_event); return NULL; } diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index d2b59af62bc0..d561e0214f52 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -147,7 +147,7 @@ OPTIONS -w:: --column-widths=<width[,width...]>:: Force each column width to the provided list, for large terminal - readability. + readability. 0 means no limit (default behavior). -t:: --field-separator=:: diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 180ae02137a5..28fdee394880 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -193,6 +193,12 @@ Default is to monitor all CPUS. sum of shown entries will be always 100%. "absolute" means it retains the original value before and after the filter is applied. +-w:: +--column-widths=<width[,width...]>:: + Force each column width to the provided list, for large terminal + readability. 0 means no limit (default behavior). + + INTERACTIVE PROMPTING KEYS -------------------------- diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 2240974b7745..95e832b1bc3c 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -263,6 +263,7 @@ LIB_H += util/xyarray.h LIB_H += util/header.h LIB_H += util/help.h LIB_H += util/session.h +LIB_H += util/ordered-events.h LIB_H += util/strbuf.h LIB_H += util/strlist.h LIB_H += util/strfilter.h @@ -347,6 +348,7 @@ LIB_OBJS += $(OUTPUT)util/machine.o LIB_OBJS += $(OUTPUT)util/map.o LIB_OBJS += $(OUTPUT)util/pstack.o LIB_OBJS += $(OUTPUT)util/session.o +LIB_OBJS += $(OUTPUT)util/ordered-events.o LIB_OBJS += $(OUTPUT)util/comm.o LIB_OBJS += $(OUTPUT)util/thread.o LIB_OBJS += $(OUTPUT)util/thread_map.o @@ -423,6 +425,7 @@ endif endif LIB_OBJS += $(OUTPUT)tests/mmap-thread-lookup.o LIB_OBJS += $(OUTPUT)tests/thread-mg-share.o +LIB_OBJS += $(OUTPUT)tests/switch-tracking.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h index e9441b9e2a30..1d3f39c3aa56 100644 --- a/tools/perf/arch/arm64/include/perf_regs.h +++ b/tools/perf/arch/arm64/include/perf_regs.h @@ -6,6 +6,8 @@ #include <asm/perf_regs.h> #define PERF_REGS_MASK ((1ULL << PERF_REG_ARM64_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_ARM64_MAX + #define PERF_REG_IP PERF_REG_ARM64_PC #define PERF_REG_SP PERF_REG_ARM64_SP diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c index 42faf369211c..49776f190abf 100644 --- a/tools/perf/arch/common.c +++ b/tools/perf/arch/common.c @@ -12,6 +12,11 @@ const char *const arm_triplets[] = { NULL }; +const char *const arm64_triplets[] = { + "aarch64-linux-android-", + NULL +}; + const char *const powerpc_triplets[] = { "powerpc-unknown-linux-gnu-", "powerpc64-unknown-linux-gnu-", @@ -105,6 +110,8 @@ static const char *normalize_arch(char *arch) return "x86"; if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5)) return "sparc"; + if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64")) + return "arm64"; if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) return "arm"; if (!strncmp(arch, "s390", 4)) @@ -159,6 +166,8 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env, if (!strcmp(arch, "arm")) path_list = arm_triplets; + else if (!strcmp(arch, "arm64")) + path_list = arm64_triplets; else if (!strcmp(arch, "powerpc")) path_list = powerpc_triplets; else if (!strcmp(arch, "sh")) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index a7c23a4b3778..d73ef8bb08c7 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -15,6 +15,7 @@ #include "util/thread.h" #include "util/callchain.h" +#include "util/debug.h" /* * When saving the callchain on Power, the kernel conservatively saves diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 1ec429fef2be..d4da6929597f 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -36,7 +36,8 @@ struct perf_annotate { struct perf_tool tool; - bool force, use_tui, use_stdio, use_gtk; + struct perf_session *session; + bool use_tui, use_stdio, use_gtk; bool full_paths; bool print_line; bool skip_missing; @@ -188,18 +189,9 @@ find_next: static int __cmd_annotate(struct perf_annotate *ann) { int ret; - struct perf_session *session; + struct perf_session *session = ann->session; struct perf_evsel *pos; u64 total_nr_samples; - struct perf_data_file file = { - .path = input_name, - .mode = PERF_DATA_MODE_READ, - .force = ann->force, - }; - - session = perf_session__new(&file, false, &ann->tool); - if (session == NULL) - return -ENOMEM; machines__set_symbol_filter(&session->machines, symbol__annotate_init); @@ -207,22 +199,22 @@ static int __cmd_annotate(struct perf_annotate *ann) ret = perf_session__cpu_bitmap(session, ann->cpu_list, ann->cpu_bitmap); if (ret) - goto out_delete; + goto out; } if (!objdump_path) { ret = perf_session_env__lookup_objdump(&session->header.env); if (ret) - goto out_delete; + goto out; } ret = perf_session__process_events(session, &ann->tool); if (ret) - goto out_delete; + goto out; if (dump_trace) { perf_session__fprintf_nr_events(session, stdout); - goto out_delete; + goto out; } if (verbose > 3) @@ -250,8 +242,8 @@ static int __cmd_annotate(struct perf_annotate *ann) } if (total_nr_samples == 0) { - ui__error("The %s file has no samples!\n", file.path); - goto out_delete; + ui__error("The %s file has no samples!\n", session->file->path); + goto out; } if (use_browser == 2) { @@ -261,24 +253,12 @@ static int __cmd_annotate(struct perf_annotate *ann) "perf_gtk__show_annotations"); if (show_annotations == NULL) { ui__error("GTK browser not found!\n"); - goto out_delete; + goto out; } show_annotations(); } -out_delete: - /* - * Speed up the exit process, for large files this can - * take quite a while. - * - * XXX Enable this when using valgrind or if we ever - * librarize this command. - * - * Also experiment with obstacks to see how much speed - * up we'll get here. - * - * perf_session__delete(session); - */ +out: return ret; } @@ -297,10 +277,14 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) .comm = perf_event__process_comm, .exit = perf_event__process_exit, .fork = perf_event__process_fork, - .ordered_samples = true, + .ordered_events = true, .ordering_requires_timestamps = true, }, }; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -308,7 +292,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "only consider symbols in these dsos"), OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol", "symbol to annotate"), - OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"), + OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, @@ -341,6 +325,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "Show event group information together"), OPT_END() }; + int ret; argc = parse_options(argc, argv, options, annotate_usage, 0); @@ -353,11 +338,16 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) setup_browser(true); + annotate.session = perf_session__new(&file, false, &annotate.tool); + if (annotate.session == NULL) + return -ENOMEM; + symbol_conf.priv_size = sizeof(struct annotation); symbol_conf.try_vmlinux_path = true; - if (symbol__init() < 0) - return -1; + ret = symbol__init(&annotate.session->header.env); + if (ret < 0) + goto out_delete; if (setup_sorting() < 0) usage_with_options(annotate_usage, options); @@ -373,5 +363,20 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) annotate.sym_hist_filter = argv[0]; } - return __cmd_annotate(&annotate); + ret = __cmd_annotate(&annotate); + +out_delete: + /* + * Speed up the exit process, for large files this can + * take quite a while. + * + * XXX Enable this when using valgrind or if we ever + * librarize this command. + * + * Also experiment with obstacks to see how much speed + * up we'll get here. + * + * perf_session__delete(session); + */ + return ret; } diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 2a2c78f80876..70385756da63 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -246,20 +246,9 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) return true; } -static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp) +static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *fp) { - struct perf_data_file file = { - .path = filename, - .mode = PERF_DATA_MODE_READ, - .force = force, - }; - struct perf_session *session = perf_session__new(&file, false, NULL); - if (session == NULL) - return -1; - perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0); - perf_session__delete(session); - return 0; } @@ -302,6 +291,12 @@ int cmd_buildid_cache(int argc, const char **argv, *missing_filename = NULL, *update_name_list_str = NULL, *kcore_filename; + char sbuf[STRERR_BUFSIZE]; + + struct perf_data_file file = { + .mode = PERF_DATA_MODE_READ, + }; + struct perf_session *session = NULL; const struct option buildid_cache_options[] = { OPT_STRING('a', "add", &add_name_list_str, @@ -326,8 +321,17 @@ int cmd_buildid_cache(int argc, const char **argv, argc = parse_options(argc, argv, buildid_cache_options, buildid_cache_usage, 0); - if (symbol__init() < 0) - return -1; + if (missing_filename) { + file.path = missing_filename; + file.force = force; + + session = perf_session__new(&file, false, NULL); + if (session == NULL) + return -1; + } + + if (symbol__init(session ? &session->header.env : NULL) < 0) + goto out; setup_pager(); @@ -344,7 +348,7 @@ int cmd_buildid_cache(int argc, const char **argv, continue; } pr_warning("Couldn't add %s: %s\n", - pos->s, strerror(errno)); + pos->s, strerror_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); @@ -362,7 +366,7 @@ int cmd_buildid_cache(int argc, const char **argv, continue; } pr_warning("Couldn't remove %s: %s\n", - pos->s, strerror(errno)); + pos->s, strerror_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); @@ -370,7 +374,7 @@ int cmd_buildid_cache(int argc, const char **argv, } if (missing_filename) - ret = build_id_cache__fprintf_missing(missing_filename, force, stdout); + ret = build_id_cache__fprintf_missing(session, stdout); if (update_name_list_str) { list = strlist__new(true, update_name_list_str); @@ -383,7 +387,7 @@ int cmd_buildid_cache(int argc, const char **argv, continue; } pr_warning("Couldn't update %s: %s\n", - pos->s, strerror(errno)); + pos->s, strerror_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); @@ -394,5 +398,9 @@ int cmd_buildid_cache(int argc, const char **argv, build_id_cache__add_kcore(kcore_filename, debugdir, force)) pr_warning("Couldn't add %s\n", kcore_filename); +out: + if (session) + perf_session__delete(session); + return ret; } diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 9a5a035cb426..190d0b6b28cc 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -360,7 +360,7 @@ static struct perf_tool tool = { .exit = perf_event__process_exit, .fork = perf_event__process_fork, .lost = perf_event__process_lost, - .ordered_samples = true, + .ordered_events = true, .ordering_requires_timestamps = true, }; @@ -1143,7 +1143,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options(argc, argv, options, diff_usage, 0); - if (symbol__init() < 0) + if (symbol__init(NULL) < 0) return -1; if (data_init(argc, argv) < 0) diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 0384d930480b..25d20628212e 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -103,6 +103,8 @@ static int check_emacsclient_version(void) static void exec_woman_emacs(const char *path, const char *page) { + char sbuf[STRERR_BUFSIZE]; + if (!check_emacsclient_version()) { /* This works only with emacsclient version >= 22. */ struct strbuf man_page = STRBUF_INIT; @@ -111,16 +113,19 @@ static void exec_woman_emacs(const char *path, const char *page) path = "emacsclient"; strbuf_addf(&man_page, "(woman \"%s\")", page); execlp(path, "emacsclient", "-e", man_page.buf, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); + warning("failed to exec '%s': %s", path, + strerror_r(errno, sbuf, sizeof(sbuf))); } } static void exec_man_konqueror(const char *path, const char *page) { const char *display = getenv("DISPLAY"); + if (display && *display) { struct strbuf man_page = STRBUF_INIT; const char *filename = "kfmclient"; + char sbuf[STRERR_BUFSIZE]; /* It's simpler to launch konqueror using kfmclient. */ if (path) { @@ -139,24 +144,31 @@ static void exec_man_konqueror(const char *path, const char *page) path = "kfmclient"; strbuf_addf(&man_page, "man:%s(1)", page); execlp(path, filename, "newTab", man_page.buf, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); + warning("failed to exec '%s': %s", path, + strerror_r(errno, sbuf, sizeof(sbuf))); } } static void exec_man_man(const char *path, const char *page) { + char sbuf[STRERR_BUFSIZE]; + if (!path) path = "man"; execlp(path, "man", page, NULL); - warning("failed to exec '%s': %s", path, strerror(errno)); + warning("failed to exec '%s': %s", path, + strerror_r(errno, sbuf, sizeof(sbuf))); } static void exec_man_cmd(const char *cmd, const char *page) { struct strbuf shell_cmd = STRBUF_INIT; + char sbuf[STRERR_BUFSIZE]; + strbuf_addf(&shell_cmd, "%s %s", cmd, page); execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL); - warning("failed to exec '%s': %s", cmd, strerror(errno)); + warning("failed to exec '%s': %s", cmd, + strerror_r(errno, sbuf, sizeof(sbuf))); } static void add_man_viewer(const char *name) diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 9a02807387d6..3a62b6b3c8fd 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -23,6 +23,7 @@ struct perf_inject { struct perf_tool tool; + struct perf_session *session; bool build_ids; bool sched_stat; const char *input_name; @@ -340,12 +341,8 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel, static int __cmd_inject(struct perf_inject *inject) { - struct perf_session *session; int ret = -EINVAL; - struct perf_data_file file = { - .path = inject->input_name, - .mode = PERF_DATA_MODE_READ, - }; + struct perf_session *session = inject->session; struct perf_data_file *file_out = &inject->output; signal(SIGINT, sig_handler); @@ -357,16 +354,12 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.tracing_data = perf_event__repipe_tracing_data; } - session = perf_session__new(&file, true, &inject->tool); - if (session == NULL) - return -ENOMEM; - if (inject->build_ids) { inject->tool.sample = perf_event__inject_buildid; } else if (inject->sched_stat) { struct perf_evsel *evsel; - inject->tool.ordered_samples = true; + inject->tool.ordered_events = true; evlist__for_each(session->evlist, evsel) { const char *name = perf_evsel__name(evsel); @@ -396,8 +389,6 @@ static int __cmd_inject(struct perf_inject *inject) perf_session__write_header(session, session->evlist, file_out->fd, true); } - perf_session__delete(session); - return ret; } @@ -427,6 +418,11 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) .mode = PERF_DATA_MODE_WRITE, }, }; + struct perf_data_file file = { + .mode = PERF_DATA_MODE_READ, + }; + int ret; + const struct option options[] = { OPT_BOOLEAN('b', "build-ids", &inject.build_ids, "Inject build-ids into the output stream"), @@ -461,8 +457,17 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) return -1; } - if (symbol__init() < 0) + file.path = inject.input_name; + inject.session = perf_session__new(&file, true, &inject.tool); + if (inject.session == NULL) + return -ENOMEM; + + if (symbol__init(&inject.session->header.env) < 0) return -1; - return __cmd_inject(&inject); + ret = __cmd_inject(&inject); + + perf_session__delete(inject.session); + + return ret; } diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index bef3376bfaf3..23762187a219 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -256,7 +256,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, static struct perf_tool perf_kmem = { .sample = process_sample_event, .comm = perf_event__process_comm, - .ordered_samples = true, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .ordered_events = true, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) @@ -403,10 +405,9 @@ static void sort_result(void) __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); } -static int __cmd_kmem(void) +static int __cmd_kmem(struct perf_session *session) { int err = -EINVAL; - struct perf_session *session; const struct perf_evsel_str_handler kmem_tracepoints[] = { { "kmem:kmalloc", perf_evsel__process_alloc_event, }, { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, @@ -415,34 +416,22 @@ static int __cmd_kmem(void) { "kmem:kfree", perf_evsel__process_free_event, }, { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, }; - struct perf_data_file file = { - .path = input_name, - .mode = PERF_DATA_MODE_READ, - }; - - session = perf_session__new(&file, false, &perf_kmem); - if (session == NULL) - return -ENOMEM; - - if (perf_session__create_kernel_maps(session) < 0) - goto out_delete; if (!perf_session__has_traces(session, "kmem record")) - goto out_delete; + goto out; if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { pr_err("Initializing perf session tracepoint handlers failed\n"); - return -1; + goto out; } setup_pager(); err = perf_session__process_events(session, &perf_kmem); if (err != 0) - goto out_delete; + goto out; sort_result(); print_result(session); -out_delete: - perf_session__delete(session); +out: return err; } @@ -689,29 +678,46 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) NULL, NULL }; + struct perf_session *session; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; + int ret = -1; + argc = parse_options_subcommand(argc, argv, kmem_options, kmem_subcommands, kmem_usage, 0); if (!argc) usage_with_options(kmem_usage, kmem_options); - symbol__init(); - if (!strncmp(argv[0], "rec", 3)) { + symbol__init(NULL); return __cmd_record(argc, argv); - } else if (!strcmp(argv[0], "stat")) { + } + + session = perf_session__new(&file, false, &perf_kmem); + if (session == NULL) + return -ENOMEM; + + symbol__init(&session->header.env); + + if (!strcmp(argv[0], "stat")) { if (cpu__setup_cpunode_map()) - return -1; + goto out_delete; if (list_empty(&caller_sort)) setup_sorting(&caller_sort, default_sort_order); if (list_empty(&alloc_sort)) setup_sorting(&alloc_sort, default_sort_order); - return __cmd_kmem(); + ret = __cmd_kmem(session); } else usage_with_options(kmem_usage, kmem_options); - return 0; +out_delete: + perf_session__delete(session); + + return ret; } diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 43367eb00510..1a4ef9cd9d5f 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -592,8 +592,8 @@ static void print_result(struct perf_kvm_stat *kvm) pr_info("%9s ", "Samples%"); pr_info("%9s ", "Time%"); - pr_info("%10s ", "Min Time"); - pr_info("%10s ", "Max Time"); + pr_info("%11s ", "Min Time"); + pr_info("%11s ", "Max Time"); pr_info("%16s ", "Avg time"); pr_info("\n\n"); @@ -610,8 +610,8 @@ static void print_result(struct perf_kvm_stat *kvm) pr_info("%10llu ", (unsigned long long)ecount); pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100); pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100); - pr_info("%8" PRIu64 "us ", min / 1000); - pr_info("%8" PRIu64 "us ", max / 1000); + pr_info("%9.2fus ", (double)min / 1e3); + pr_info("%9.2fus ", (double)max / 1e3); pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, kvm_event_rel_stddev(vcpu, event)); pr_info("\n"); @@ -732,7 +732,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, return -1; } - err = perf_session_queue_event(kvm->session, event, &sample, 0); + err = perf_session_queue_event(kvm->session, event, &kvm->tool, &sample, 0); /* * FIXME: Here we can't consume the event, as perf_session_queue_event will * point to it, and it'll get possibly overwritten by the kernel. @@ -785,7 +785,7 @@ static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm) /* flush queue after each round in which we processed events */ if (ntotal) { - kvm->session->ordered_samples.next_flush = flush_time; + kvm->session->ordered_events.next_flush = flush_time; err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session); if (err) { if (kvm->lost_events) @@ -885,15 +885,11 @@ static int fd_set_nonblock(int fd) return 0; } -static -int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save) +static int perf_kvm__handle_stdin(void) { int c; - tcsetattr(0, TCSANOW, tc_now); c = getc(stdin); - tcsetattr(0, TCSAFLUSH, tc_save); - if (c == 'q') return 1; @@ -904,7 +900,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm) { struct pollfd *pollfds = NULL; int nr_fds, nr_stdin, ret, err = -EINVAL; - struct termios tc, save; + struct termios save; /* live flag must be set first */ kvm->live = true; @@ -919,14 +915,9 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm) goto out; } + set_term_quiet_input(&save); init_kvm_event_record(kvm); - tcgetattr(0, &save); - tc = save; - tc.c_lflag &= ~(ICANON | ECHO); - tc.c_cc[VMIN] = 0; - tc.c_cc[VTIME] = 0; - signal(SIGINT, sig_handler); signal(SIGTERM, sig_handler); @@ -972,7 +963,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm) goto out; if (pollfds[nr_stdin].revents & POLLIN) - done = perf_kvm__handle_stdin(&tc, &save); + done = perf_kvm__handle_stdin(); if (!rc && !done) err = poll(pollfds, nr_fds, 100); @@ -989,6 +980,7 @@ out: if (kvm->timerfd >= 0) close(kvm->timerfd); + tcsetattr(0, TCSAFLUSH, &save); free(pollfds); return err; } @@ -998,6 +990,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm) int err, rc = -1; struct perf_evsel *pos; struct perf_evlist *evlist = kvm->evlist; + char sbuf[STRERR_BUFSIZE]; perf_evlist__config(evlist, &kvm->opts); @@ -1034,12 +1027,14 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm) err = perf_evlist__open(evlist); if (err < 0) { - printf("Couldn't create the events: %s\n", strerror(errno)); + printf("Couldn't create the events: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out; } if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) { - ui__error("Failed to mmap the events: %s\n", strerror(errno)); + ui__error("Failed to mmap the events: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); perf_evlist__close(evlist); goto out; } @@ -1058,7 +1053,7 @@ static int read_events(struct perf_kvm_stat *kvm) struct perf_tool eops = { .sample = process_sample_event, .comm = perf_event__process_comm, - .ordered_samples = true, + .ordered_events = true, }; struct perf_data_file file = { .path = kvm->file_name, @@ -1072,6 +1067,8 @@ static int read_events(struct perf_kvm_stat *kvm) return -EINVAL; } + symbol__init(&kvm->session->header.env); + if (!perf_session__has_traces(kvm->session, "kvm record")) return -EINVAL; @@ -1201,8 +1198,6 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) NULL }; - symbol__init(); - if (argc) { argc = parse_options(argc, argv, kvm_events_report_options, @@ -1311,7 +1306,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, kvm->tool.exit = perf_event__process_exit; kvm->tool.fork = perf_event__process_fork; kvm->tool.lost = process_lost_event; - kvm->tool.ordered_samples = true; + kvm->tool.ordered_events = true; perf_tool__fill_defaults(&kvm->tool); /* set defaults */ @@ -1322,7 +1317,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, kvm->opts.target.uid_str = NULL; kvm->opts.target.uid = UINT_MAX; - symbol__init(); + symbol__init(NULL); disable_buildid_cache(); use_browser = 0; diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 6148afc995c6..92790ed7af45 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -852,7 +852,7 @@ static int __cmd_report(bool display_info) struct perf_tool eops = { .sample = process_sample_event, .comm = perf_event__process_comm, - .ordered_samples = true, + .ordered_events = true, }; struct perf_data_file file = { .path = input_name, @@ -865,6 +865,8 @@ static int __cmd_report(bool display_info) return -ENOMEM; } + symbol__init(&session->header.env); + if (!perf_session__has_traces(session, "lock record")) goto out_delete; @@ -974,7 +976,6 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) unsigned int i; int rc = 0; - symbol__init(); for (i = 0; i < LOCKHASH_SIZE; i++) INIT_LIST_HEAD(lockhash_table + i); diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 4a1a6c94a5eb..8b4a87fe3858 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -133,7 +133,7 @@ static int report_raw_events(struct perf_mem *mem) goto out_delete; } - if (symbol__init() < 0) + if (symbol__init(&session->header.env) < 0) return -1; printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n"); @@ -194,7 +194,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) .lost = perf_event__process_lost, .fork = perf_event__process_fork, .build_id = perf_event__process_build_id, - .ordered_samples = true, + .ordered_events = true, }, .input_name = "perf.data", }; diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index c63fa2925075..347729e29a92 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -290,8 +290,11 @@ static void cleanup_params(void) static void pr_err_with_code(const char *msg, int err) { + char sbuf[STRERR_BUFSIZE]; + pr_err("%s", msg); - pr_debug(" Reason: %s (Code: %d)", strerror(-err), err); + pr_debug(" Reason: %s (Code: %d)", + strerror_r(-err, sbuf, sizeof(sbuf)), err); pr_err("\n"); } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4869050e7194..87e28a4e33ba 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -161,7 +161,7 @@ try_again: if (perf_evlist__apply_filters(evlist)) { error("failed to set filter with %d (%s)\n", errno, - strerror(errno)); + strerror_r(errno, msg, sizeof(msg))); rc = -1; goto out; } @@ -175,7 +175,8 @@ try_again: "(current value: %u)\n", opts->mmap_pages); rc = -errno; } else { - pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno)); + pr_err("failed to mmap with %d (%s)\n", errno, + strerror_r(errno, msg, sizeof(msg))); rc = -errno; } goto out; @@ -480,7 +481,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } if (forks && workload_exec_errno) { - char msg[512]; + char msg[STRERR_BUFSIZE]; const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg)); pr_err("Workload failed: %s\n", emsg); err = -1; @@ -781,6 +782,7 @@ static const char * const record_usage[] = { */ static struct record record = { .opts = { + .sample_time = true, .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, @@ -907,7 +909,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) usage_with_options(record_usage, record_options); } - symbol__init(); + symbol__init(NULL); if (symbol_conf.kptr_restrict) pr_warning( diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 21d830bafff3..3da59a87ec7c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -58,17 +58,19 @@ struct report { const char *symbol_filter_str; float min_percent; u64 nr_entries; + u64 queue_size; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; static int report__config(const char *var, const char *value, void *cb) { + struct report *rep = cb; + if (!strcmp(var, "report.group")) { symbol_conf.event_group = perf_config_bool(var, value); return 0; } if (!strcmp(var, "report.percent-limit")) { - struct report *rep = cb; rep->min_percent = strtof(value, NULL); return 0; } @@ -76,6 +78,10 @@ static int report__config(const char *var, const char *value, void *cb) symbol_conf.cumulate_callchain = perf_config_bool(var, value); return 0; } + if (!strcmp(var, "report.queue-size")) { + rep->queue_size = perf_config_u64(var, value); + return 0; + } return perf_default_config(var, value, cb); } @@ -578,7 +584,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) .attr = perf_event__process_attr, .tracing_data = perf_event__process_tracing_data, .build_id = perf_event__process_build_id, - .ordered_samples = true, + .ordered_events = true, .ordering_requires_timestamps = true, }, .max_stack = PERF_MAX_STACK_DEPTH, @@ -714,12 +720,17 @@ repeat: if (session == NULL) return -ENOMEM; + if (report.queue_size) { + ordered_events__set_alloc_size(&session->ordered_events, + report.queue_size); + } + report.session = session; has_br_stack = perf_header__has_feat(&session->header, HEADER_BRANCH_STACK); - if (branch_mode == -1 && has_br_stack) { + if ((branch_mode == -1 && has_br_stack) || branch_mode == 1) { sort__mode = SORT_MODE__BRANCH; symbol_conf.cumulate_callchain = false; } @@ -787,7 +798,7 @@ repeat: } } - if (symbol__init() < 0) + if (symbol__init(&session->header.env) < 0) goto error; if (argc) { diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index f83c08c0dd87..9c9287fbf8e9 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -428,6 +428,7 @@ static u64 get_cpu_usage_nsec_parent(void) static int self_open_counters(void) { struct perf_event_attr attr; + char sbuf[STRERR_BUFSIZE]; int fd; memset(&attr, 0, sizeof(attr)); @@ -440,7 +441,8 @@ static int self_open_counters(void) if (fd < 0) pr_err("Error: sys_perf_event_open() syscall returned " - "with %d (%s)\n", fd, strerror(errno)); + "with %d (%s)\n", fd, + strerror_r(errno, sbuf, sizeof(sbuf))); return fd; } @@ -1462,6 +1464,8 @@ static int perf_sched__read_events(struct perf_sched *sched, return -1; } + symbol__init(&session->header.env); + if (perf_session__set_tracepoints_handlers(session, handlers)) goto out_delete; @@ -1662,7 +1666,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .comm = perf_event__process_comm, .lost = perf_event__process_lost, .fork = perf_sched__process_fork_event, - .ordered_samples = true, + .ordered_events = true, }, .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), .sort_list = LIST_HEAD_INIT(sched.sort_list), @@ -1747,7 +1751,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) if (!strcmp(argv[0], "script")) return cmd_script(argc, argv, prefix); - symbol__init(); if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); } else if (!strncmp(argv[0], "lat", 3)) { diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index f57035b89c15..02dce9295e2c 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -184,10 +184,6 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP)) return -EINVAL; - - if (!no_callchain && - !(attr->sample_type & PERF_SAMPLE_CALLCHAIN)) - symbol_conf.use_callchain = false; } if (PRINT_FIELD(ADDR) && @@ -290,6 +286,19 @@ static int perf_session__check_output_opt(struct perf_session *session) set_print_ip_opts(&evsel->attr); } + if (!no_callchain) { + bool use_callchain = false; + + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { + use_callchain = true; + break; + } + } + if (!use_callchain) + symbol_conf.use_callchain = false; + } + /* * set default for tracepoints to print symbols only * if callchains are present @@ -476,6 +485,11 @@ static int default_start_script(const char *script __maybe_unused, return 0; } +static int default_flush_script(void) +{ + return 0; +} + static int default_stop_script(void) { return 0; @@ -489,6 +503,7 @@ static int default_generate_script(struct pevent *pevent __maybe_unused, static struct scripting_ops default_scripting_ops = { .start_script = default_start_script, + .flush_script = default_flush_script, .stop_script = default_stop_script, .process_event = process_event, .generate_script = default_generate_script, @@ -504,6 +519,11 @@ static void setup_scripting(void) scripting_ops = &default_scripting_ops; } +static int flush_scripting(void) +{ + return scripting_ops->flush_script(); +} + static int cleanup_scripting(void) { pr_debug("\nperf script stopped\n"); @@ -1471,12 +1491,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) bool show_full_info = false; bool header = false; bool header_only = false; + bool script_started = false; char *rec_script_path = NULL; char *rep_script_path = NULL; struct perf_session *session; char *script_path = NULL; const char **__argv; - int i, j, err; + int i, j, err = 0; struct perf_script script = { .tool = { .sample = process_sample_event, @@ -1488,7 +1509,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) .attr = process_attr, .tracing_data = perf_event__process_tracing_data, .build_id = perf_event__process_build_id, - .ordered_samples = true, + .ordered_events = true, .ordering_requires_timestamps = true, }, }; @@ -1718,8 +1739,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) exit(-1); } - if (symbol__init() < 0) - return -1; if (!script_name) setup_pager(); @@ -1730,14 +1749,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) if (header || header_only) { perf_session__fprintf_info(session, stdout, show_full_info); if (header_only) - return 0; + goto out_delete; } + if (symbol__init(&session->header.env) < 0) + goto out_delete; + script.session = session; if (cpu_list) { - if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) - return -1; + err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); + if (err < 0) + goto out_delete; } if (!no_callchain) @@ -1752,53 +1775,62 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) if (output_set_by_user()) { fprintf(stderr, "custom fields not supported for generated scripts"); - return -1; + err = -EINVAL; + goto out_delete; } input = open(file.path, O_RDONLY); /* input_name */ if (input < 0) { + err = -errno; perror("failed to open file"); - return -1; + goto out_delete; } err = fstat(input, &perf_stat); if (err < 0) { perror("failed to stat file"); - return -1; + goto out_delete; } if (!perf_stat.st_size) { fprintf(stderr, "zero-sized file, nothing to do!\n"); - return 0; + goto out_delete; } scripting_ops = script_spec__lookup(generate_script_lang); if (!scripting_ops) { fprintf(stderr, "invalid language specifier"); - return -1; + err = -ENOENT; + goto out_delete; } err = scripting_ops->generate_script(session->tevent.pevent, "perf-script"); - goto out; + goto out_delete; } if (script_name) { err = scripting_ops->start_script(script_name, argc, argv); if (err) - goto out; + goto out_delete; pr_debug("perf script started with script %s\n\n", script_name); + script_started = true; } err = perf_session__check_output_opt(session); if (err < 0) - goto out; + goto out_delete; err = __cmd_script(&script); + flush_scripting(); + +out_delete: perf_session__delete(session); - cleanup_scripting(); + + if (script_started) + cleanup_scripting(); out: return err; } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3e80aa10cfd8..5fe0edb1de5d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -593,7 +593,7 @@ static int __run_perf_stat(int argc, const char **argv) if (perf_evlist__apply_filters(evsel_list)) { error("failed to set filter with %d (%s)\n", errno, - strerror(errno)); + strerror_r(errno, msg, sizeof(msg))); return -1; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 2f1a5220c090..48eea6cd2f5b 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1607,6 +1607,8 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name) if (session == NULL) return -ENOMEM; + symbol__init(&session->header.env); + (void)perf_header__process_sections(&session->header, perf_data_file__fd(session->file), tchart, @@ -1920,7 +1922,7 @@ int cmd_timechart(int argc, const char **argv, .fork = process_fork_event, .exit = process_exit_event, .sample = process_sample_event, - .ordered_samples = true, + .ordered_events = true, }, .proc_num = 15, .min_time = 1000000, @@ -1982,8 +1984,6 @@ int cmd_timechart(int argc, const char **argv, return -1; } - symbol__init(); - if (argc && !strncmp(argv[0], "rec", 3)) { argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 377971dc89a3..9848e270b92c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -276,11 +276,17 @@ static void perf_top__print_sym_table(struct perf_top *top) return; } + if (top->zero) { + hists__delete_entries(&top->sym_evsel->hists); + } else { + hists__decay_entries(&top->sym_evsel->hists, + top->hide_user_symbols, + top->hide_kernel_symbols); + } + hists__collapse_resort(&top->sym_evsel->hists, NULL); hists__output_resort(&top->sym_evsel->hists); - hists__decay_entries(&top->sym_evsel->hists, - top->hide_user_symbols, - top->hide_kernel_symbols); + hists__output_recalc_col_len(&top->sym_evsel->hists, top->print_entries - printed); putchar('\n'); @@ -427,18 +433,13 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) if (!perf_top__key_mapped(top, c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; - struct termios tc, save; + struct termios save; perf_top__print_mapped_keys(top); fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); fflush(stdout); - tcgetattr(0, &save); - tc = save; - tc.c_lflag &= ~(ICANON | ECHO); - tc.c_cc[VMIN] = 0; - tc.c_cc[VTIME] = 0; - tcsetattr(0, TCSANOW, &tc); + set_term_quiet_input(&save); poll(&stdin_poll, 1, -1); c = getc(stdin); @@ -542,11 +543,16 @@ static void perf_top__sort_new_samples(void *arg) if (t->evlist->selected != NULL) t->sym_evsel = t->evlist->selected; + if (t->zero) { + hists__delete_entries(&t->sym_evsel->hists); + } else { + hists__decay_entries(&t->sym_evsel->hists, + t->hide_user_symbols, + t->hide_kernel_symbols); + } + hists__collapse_resort(&t->sym_evsel->hists, NULL); hists__output_resort(&t->sym_evsel->hists); - hists__decay_entries(&t->sym_evsel->hists, - t->hide_user_symbols, - t->hide_kernel_symbols); } static void *display_thread_tui(void *arg) @@ -577,23 +583,32 @@ static void *display_thread_tui(void *arg) return NULL; } +static void display_sig(int sig __maybe_unused) +{ + done = 1; +} + +static void display_setup_sig(void) +{ + signal(SIGSEGV, display_sig); + signal(SIGFPE, display_sig); + signal(SIGINT, display_sig); + signal(SIGQUIT, display_sig); + signal(SIGTERM, display_sig); +} + static void *display_thread(void *arg) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; - struct termios tc, save; + struct termios save; struct perf_top *top = arg; int delay_msecs, c; - tcgetattr(0, &save); - tc = save; - tc.c_lflag &= ~(ICANON | ECHO); - tc.c_cc[VMIN] = 0; - tc.c_cc[VTIME] = 0; - + display_setup_sig(); pthread__unblock_sigwinch(); repeat: delay_msecs = top->delay_secs * 1000; - tcsetattr(0, TCSANOW, &tc); + set_term_quiet_input(&save); /* trash return*/ getc(stdin); @@ -620,13 +635,16 @@ repeat: } } + tcsetattr(0, TCSAFLUSH, &save); return NULL; } -static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) +static int symbol_filter(struct map *map, struct symbol *sym) { const char *name = sym->name; + if (!map->dso->kernel) + return 0; /* * ppc64 uses function descriptors and appends a '.' to the * start of every instruction address. Remove it. @@ -876,7 +894,7 @@ try_again: if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { ui__error("Failed to mmap with %d (%s)\n", - errno, strerror(errno)); + errno, strerror_r(errno, msg, sizeof(msg))); goto out_err; } @@ -963,7 +981,7 @@ static int __cmd_top(struct perf_top *top) param.sched_priority = top->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { ui__error("Could not set realtime priority.\n"); - goto out_delete; + goto out_join; } } @@ -977,6 +995,8 @@ static int __cmd_top(struct perf_top *top) } ret = 0; +out_join: + pthread_join(thread, NULL); out_delete: perf_session__delete(top->session); top->session = NULL; @@ -1131,6 +1151,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "Don't show entries under that percent", parse_percent_limit), OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", "How to display percentage of filtered entries", parse_filter_percentage), + OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, + "width[,width...]", + "don't try to adjust column width, use these fixed values"), OPT_END() }; const char * const top_usage[] = { @@ -1217,7 +1240,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) symbol_conf.priv_size = sizeof(struct annotation); symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); - if (symbol__init() < 0) + if (symbol__init(NULL) < 0) return -1; sort__setup_elide(stdout); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index a6c375224f46..a9e96ff49c7f 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -402,6 +402,31 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags +static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + +#define P_MREMAP_FLAG(n) \ + if (flags & MREMAP_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~MREMAP_##n; \ + } + + P_MREMAP_FLAG(MAYMOVE); +#ifdef MREMAP_FIXED + P_MREMAP_FLAG(FIXED); +#endif +#undef P_MREMAP_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags + static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size, struct syscall_arg *arg) { @@ -1004,6 +1029,7 @@ static struct syscall_fmt { [2] = SCA_MMAP_PROT, /* prot */ }, }, { .name = "mremap", .hexret = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ + [3] = SCA_MREMAP_FLAGS, /* flags */ [4] = SCA_HEX, /* new_addr */ }, }, { .name = "munlock", .errmsg = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, @@ -1385,7 +1411,7 @@ static int trace__tool_process(struct perf_tool *tool, static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) { - int err = symbol__init(); + int err = symbol__init(NULL); if (err) return err; @@ -1724,7 +1750,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, signed_print: fprintf(trace->output, ") = %d", ret); } else if (ret < 0 && sc->fmt->errmsg) { - char bf[256]; + char bf[STRERR_BUFSIZE]; const char *emsg = strerror_r(-ret, bf, sizeof(bf)), *e = audit_errno_to_name(-ret); @@ -2018,6 +2044,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) int err = -1, i; unsigned long before; const bool forks = argc > 0; + char sbuf[STRERR_BUFSIZE]; trace->live = true; @@ -2079,7 +2106,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv) err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false); if (err < 0) { - fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno)); + fprintf(trace->output, "Couldn't mmap the events: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } @@ -2209,19 +2237,19 @@ static int trace__replay(struct trace *trace) trace->tool.tracing_data = perf_event__process_tracing_data; trace->tool.build_id = perf_event__process_build_id; - trace->tool.ordered_samples = true; + trace->tool.ordered_events = true; trace->tool.ordering_requires_timestamps = true; /* add tid to output */ trace->multiple_threads = true; - if (symbol__init() < 0) - return -1; - session = perf_session__new(&file, false, &trace->tool); if (session == NULL) return -ENOMEM; + if (symbol__init(&session->header.env) < 0) + goto out; + trace->host = &session->machines.host; err = perf_session__set_tracepoints_handlers(session, handlers); diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 1f67aa02d240..75d4c237b03d 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -120,6 +120,29 @@ ifdef PARSER_DEBUG CFLAGS += -DPARSER_DEBUG endif +ifndef NO_LIBPYTHON + # Try different combinations to accommodate systems that only have + # python[2][-config] in weird combinations but always preferring + # python2 and python2-config as per pep-0394. If we catch a + # python[-config] in version 3, the version check will kill it. + PYTHON2 := $(if $(call get-executable,python2),python2,python) + override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2)) + PYTHON2_CONFIG := \ + $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config) + override PYTHON_CONFIG := \ + $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG)) + + PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) + + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) + + FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) + FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) + FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) + FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) +endif + CFLAGS += -fno-omit-frame-pointer CFLAGS += -ggdb3 CFLAGS += -funwind-tables @@ -482,21 +505,14 @@ define disable-python_code NO_LIBPYTHON := 1 endef -override PYTHON := \ - $(call get-executable-or-default,PYTHON,python) - -ifndef PYTHON - $(call disable-python,python interpreter) +ifdef NO_LIBPYTHON + $(call disable-python) else - PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - - ifdef NO_LIBPYTHON - $(call disable-python) + ifndef PYTHON + $(call disable-python,python interpreter) else - - override PYTHON_CONFIG := \ - $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) + PYTHON_WORD := $(call shell-wordify,$(PYTHON)) ifndef PYTHON_CONFIG $(call disable-python,python-config tool) diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile index 6088f8d8a434..72ab2984718e 100644 --- a/tools/perf/config/feature-checks/Makefile +++ b/tools/perf/config/feature-checks/Makefile @@ -101,25 +101,11 @@ FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) test-libperl.bin: $(BUILD) $(FLAGS_PERL_EMBED) -override PYTHON := python -override PYTHON_CONFIG := python-config - -escape-for-shell-sq = $(subst ','\'',$(1)) -shell-sq = '$(escape-for-shell-sq)' - -PYTHON_CONFIG_SQ = $(call shell-sq,$(PYTHON_CONFIG)) - -PYTHON_EMBED_LDOPTS = $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) -PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) -PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) -FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) - test-libpython.bin: - $(BUILD) $(FLAGS_PYTHON_EMBED) + $(BUILD) test-libpython-version.bin: - $(BUILD) $(FLAGS_PYTHON_EMBED) + $(BUILD) test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 2282d41879a2..452a8474d29d 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -313,6 +313,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) int status; struct stat st; const char *prefix; + char sbuf[STRERR_BUFSIZE]; prefix = NULL; if (p->option & RUN_SETUP) @@ -343,7 +344,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) status = 1; /* Check for ENOSPC and EIO errors.. */ if (fflush(stdout)) { - fprintf(stderr, "write failure on standard output: %s", strerror(errno)); + fprintf(stderr, "write failure on standard output: %s", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out; } if (ferror(stdout)) { @@ -351,7 +353,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) goto out; } if (fclose(stdout)) { - fprintf(stderr, "close failed on standard output: %s", strerror(errno)); + fprintf(stderr, "close failed on standard output: %s", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out; } status = 0; @@ -466,6 +469,7 @@ void pthread__unblock_sigwinch(void) int main(int argc, const char **argv) { const char *cmd; + char sbuf[STRERR_BUFSIZE]; /* The page_size is placed in util object. */ page_size = sysconf(_SC_PAGE_SIZE); @@ -561,7 +565,7 @@ int main(int argc, const char **argv) } fprintf(stderr, "Failed to run command '%s': %s\n", - cmd, strerror(errno)); + cmd, strerror_r(errno, sbuf, sizeof(sbuf))); out: return 1; } diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 6f8b01bc6033..6a4145e5ad2c 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -154,6 +154,10 @@ static struct test { .func = test__hists_cumulate, }, { + .desc = "Test tracking with sched_switch", + .func = test__switch_tracking, + }, + { .func = NULL, }, }; @@ -185,9 +189,11 @@ static bool perf_test__matches(int curr, int argc, const char *argv[]) static int run_test(struct test *test) { int status, err = -1, child = fork(); + char sbuf[STRERR_BUFSIZE]; if (child < 0) { - pr_err("failed to fork test: %s\n", strerror(errno)); + pr_err("failed to fork test: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); return -1; } @@ -297,7 +303,7 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) symbol_conf.sort_by_name = true; symbol_conf.try_vmlinux_path = true; - if (symbol__init() < 0) + if (symbol__init(NULL) < 0) return -1; if (skip != NULL) diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 142263492f6f..9b9622a33932 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -31,6 +31,7 @@ int test__basic_mmap(void) unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; + char sbuf[STRERR_BUFSIZE]; threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { @@ -49,7 +50,7 @@ int test__basic_mmap(void) sched_setaffinity(0, sizeof(cpu_set), &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[0], strerror(errno)); + cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf))); goto out_free_cpus; } @@ -79,7 +80,7 @@ int test__basic_mmap(void) if (perf_evsel__open(evsels[i], cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } @@ -89,7 +90,7 @@ int test__basic_mmap(void) if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c index 5fecdbd2f5f7..8fa82d1700c7 100644 --- a/tools/perf/tests/open-syscall-all-cpus.c +++ b/tools/perf/tests/open-syscall-all-cpus.c @@ -12,6 +12,7 @@ int test__open_syscall_event_on_all_cpus(void) unsigned int nr_open_calls = 111, i; cpu_set_t cpu_set; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); + char sbuf[STRERR_BUFSIZE]; if (threads == NULL) { pr_debug("thread_map__new\n"); @@ -35,7 +36,7 @@ int test__open_syscall_event_on_all_cpus(void) if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } @@ -56,7 +57,7 @@ int test__open_syscall_event_on_all_cpus(void) if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[cpu], - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } for (i = 0; i < ncalls; ++i) { diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c index 0785b64ffd6c..922bdb627950 100644 --- a/tools/perf/tests/open-syscall-tp-fields.c +++ b/tools/perf/tests/open-syscall-tp-fields.c @@ -22,6 +22,7 @@ int test__syscall_open_tp_fields(void) struct perf_evlist *evlist = perf_evlist__new(); struct perf_evsel *evsel; int err = -1, i, nr_events = 0, nr_polls = 0; + char sbuf[STRERR_BUFSIZE]; if (evlist == NULL) { pr_debug("%s: perf_evlist__new\n", __func__); @@ -48,13 +49,15 @@ int test__syscall_open_tp_fields(void) err = perf_evlist__open(evlist); if (err < 0) { - pr_debug("perf_evlist__open: %s\n", strerror(errno)); + pr_debug("perf_evlist__open: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, UINT_MAX, false); if (err < 0) { - pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + pr_debug("perf_evlist__mmap: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c index c1dc7d25f38c..a33b2daae40f 100644 --- a/tools/perf/tests/open-syscall.c +++ b/tools/perf/tests/open-syscall.c @@ -9,6 +9,7 @@ int test__open_syscall_event(void) struct perf_evsel *evsel; unsigned int nr_open_calls = 111, i; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); + char sbuf[STRERR_BUFSIZE]; if (threads == NULL) { pr_debug("thread_map__new\n"); @@ -24,7 +25,7 @@ int test__open_syscall_event(void) if (perf_evsel__open_per_thread(evsel, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index aca1a83dd13a..2ce753c1db63 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -59,6 +59,7 @@ int test__PERF_RECORD(void) int err = -1, errs = 0, i, wakeups = 0; u32 cpu; int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; + char sbuf[STRERR_BUFSIZE]; if (evlist == NULL || argv == NULL) { pr_debug("Not enough memory to create evlist\n"); @@ -100,7 +101,8 @@ int test__PERF_RECORD(void) err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { - pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); + pr_debug("sched__get_first_possible_cpu: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } @@ -110,7 +112,8 @@ int test__PERF_RECORD(void) * So that we can check perf_sample.cpu on all the samples. */ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { - pr_debug("sched_setaffinity: %s\n", strerror(errno)); + pr_debug("sched_setaffinity: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } @@ -120,7 +123,8 @@ int test__PERF_RECORD(void) */ err = perf_evlist__open(evlist); if (err < 0) { - pr_debug("perf_evlist__open: %s\n", strerror(errno)); + pr_debug("perf_evlist__open: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } @@ -131,7 +135,8 @@ int test__PERF_RECORD(void) */ err = perf_evlist__mmap(evlist, opts.mmap_pages, false); if (err < 0) { - pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + pr_debug("perf_evlist__mmap: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c index c04d1f268576..d31f2c4d9f64 100644 --- a/tools/perf/tests/rdpmc.c +++ b/tools/perf/tests/rdpmc.c @@ -100,6 +100,7 @@ static int __test__rdpmc(void) }; u64 delta_sum = 0; struct sigaction sa; + char sbuf[STRERR_BUFSIZE]; sigfillset(&sa.sa_mask); sa.sa_sigaction = segfault_handler; @@ -109,14 +110,15 @@ static int __test__rdpmc(void) perf_event_open_cloexec_flag()); if (fd < 0) { pr_err("Error: sys_perf_event_open() syscall returned " - "with %d (%s)\n", fd, strerror(errno)); + "with %d (%s)\n", fd, + strerror_r(errno, sbuf, sizeof(sbuf))); return -1; } addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); if (addr == (void *)(-1)) { pr_err("Error: mmap() syscall returned with (%s)\n", - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_close; } diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 983d6b8562a8..1aa21c90731b 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c @@ -22,6 +22,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) volatile int tmp = 0; u64 total_periods = 0; int nr_samples = 0; + char sbuf[STRERR_BUFSIZE]; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; @@ -62,14 +63,15 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) err = -errno; pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", - strerror(errno), knob, (u64)attr.sample_freq); + strerror_r(errno, sbuf, sizeof(sbuf)), + knob, (u64)attr.sample_freq); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, 128, true); if (err < 0) { pr_debug("failed to mmap event: %d (%s)\n", errno, - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c new file mode 100644 index 000000000000..cc68648c7c55 --- /dev/null +++ b/tools/perf/tests/switch-tracking.c @@ -0,0 +1,572 @@ +#include <sys/time.h> +#include <sys/prctl.h> +#include <time.h> +#include <stdlib.h> + +#include "parse-events.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "tests.h" + +static int spin_sleep(void) +{ + struct timeval start, now, diff, maxtime; + struct timespec ts; + int err, i; + + maxtime.tv_sec = 0; + maxtime.tv_usec = 50000; + + err = gettimeofday(&start, NULL); + if (err) + return err; + + /* Spin for 50ms */ + while (1) { + for (i = 0; i < 1000; i++) + barrier(); + + err = gettimeofday(&now, NULL); + if (err) + return err; + + timersub(&now, &start, &diff); + if (timercmp(&diff, &maxtime, > /* For checkpatch */)) + break; + } + + ts.tv_nsec = 50 * 1000 * 1000; + ts.tv_sec = 0; + + /* Sleep for 50ms */ + err = nanosleep(&ts, NULL); + if (err == EINTR) + err = 0; + + return err; +} + +struct switch_tracking { + struct perf_evsel *switch_evsel; + struct perf_evsel *cycles_evsel; + pid_t *tids; + int nr_tids; + int comm_seen[4]; + int cycles_before_comm_1; + int cycles_between_comm_2_and_comm_3; + int cycles_after_comm_4; +}; + +static int check_comm(struct switch_tracking *switch_tracking, + union perf_event *event, const char *comm, int nr) +{ + if (event->header.type == PERF_RECORD_COMM && + (pid_t)event->comm.pid == getpid() && + (pid_t)event->comm.tid == getpid() && + strcmp(event->comm.comm, comm) == 0) { + if (switch_tracking->comm_seen[nr]) { + pr_debug("Duplicate comm event\n"); + return -1; + } + switch_tracking->comm_seen[nr] = 1; + pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr); + return 1; + } + return 0; +} + +static int check_cpu(struct switch_tracking *switch_tracking, int cpu) +{ + int i, nr = cpu + 1; + + if (cpu < 0) + return -1; + + if (!switch_tracking->tids) { + switch_tracking->tids = calloc(nr, sizeof(pid_t)); + if (!switch_tracking->tids) + return -1; + for (i = 0; i < nr; i++) + switch_tracking->tids[i] = -1; + switch_tracking->nr_tids = nr; + return 0; + } + + if (cpu >= switch_tracking->nr_tids) { + void *addr; + + addr = realloc(switch_tracking->tids, nr * sizeof(pid_t)); + if (!addr) + return -1; + switch_tracking->tids = addr; + for (i = switch_tracking->nr_tids; i < nr; i++) + switch_tracking->tids[i] = -1; + switch_tracking->nr_tids = nr; + return 0; + } + + return 0; +} + +static int process_sample_event(struct perf_evlist *evlist, + union perf_event *event, + struct switch_tracking *switch_tracking) +{ + struct perf_sample sample; + struct perf_evsel *evsel; + pid_t next_tid, prev_tid; + int cpu, err; + + if (perf_evlist__parse_sample(evlist, event, &sample)) { + pr_debug("perf_evlist__parse_sample failed\n"); + return -1; + } + + evsel = perf_evlist__id2evsel(evlist, sample.id); + if (evsel == switch_tracking->switch_evsel) { + next_tid = perf_evsel__intval(evsel, &sample, "next_pid"); + prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid"); + cpu = sample.cpu; + pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n", + cpu, prev_tid, next_tid); + err = check_cpu(switch_tracking, cpu); + if (err) + return err; + /* + * Check for no missing sched_switch events i.e. that the + * evsel->system_wide flag has worked. + */ + if (switch_tracking->tids[cpu] != -1 && + switch_tracking->tids[cpu] != prev_tid) { + pr_debug("Missing sched_switch events\n"); + return -1; + } + switch_tracking->tids[cpu] = next_tid; + } + + if (evsel == switch_tracking->cycles_evsel) { + pr_debug3("cycles event\n"); + if (!switch_tracking->comm_seen[0]) + switch_tracking->cycles_before_comm_1 = 1; + if (switch_tracking->comm_seen[1] && + !switch_tracking->comm_seen[2]) + switch_tracking->cycles_between_comm_2_and_comm_3 = 1; + if (switch_tracking->comm_seen[3]) + switch_tracking->cycles_after_comm_4 = 1; + } + + return 0; +} + +static int process_event(struct perf_evlist *evlist, union perf_event *event, + struct switch_tracking *switch_tracking) +{ + if (event->header.type == PERF_RECORD_SAMPLE) + return process_sample_event(evlist, event, switch_tracking); + + if (event->header.type == PERF_RECORD_COMM) { + int err, done = 0; + + err = check_comm(switch_tracking, event, "Test COMM 1", 0); + if (err < 0) + return -1; + done += err; + err = check_comm(switch_tracking, event, "Test COMM 2", 1); + if (err < 0) + return -1; + done += err; + err = check_comm(switch_tracking, event, "Test COMM 3", 2); + if (err < 0) + return -1; + done += err; + err = check_comm(switch_tracking, event, "Test COMM 4", 3); + if (err < 0) + return -1; + done += err; + if (done != 1) { + pr_debug("Unexpected comm event\n"); + return -1; + } + } + + return 0; +} + +struct event_node { + struct list_head list; + union perf_event *event; + u64 event_time; +}; + +static int add_event(struct perf_evlist *evlist, struct list_head *events, + union perf_event *event) +{ + struct perf_sample sample; + struct event_node *node; + + node = malloc(sizeof(struct event_node)); + if (!node) { + pr_debug("malloc failed\n"); + return -1; + } + node->event = event; + list_add(&node->list, events); + + if (perf_evlist__parse_sample(evlist, event, &sample)) { + pr_debug("perf_evlist__parse_sample failed\n"); + return -1; + } + + if (!sample.time) { + pr_debug("event with no time\n"); + return -1; + } + + node->event_time = sample.time; + + return 0; +} + +static void free_event_nodes(struct list_head *events) +{ + struct event_node *node; + + while (!list_empty(events)) { + node = list_entry(events->next, struct event_node, list); + list_del(&node->list); + free(node); + } +} + +static int compar(const void *a, const void *b) +{ + const struct event_node *nodea = a; + const struct event_node *nodeb = b; + s64 cmp = nodea->event_time - nodeb->event_time; + + return cmp; +} + +static int process_events(struct perf_evlist *evlist, + struct switch_tracking *switch_tracking) +{ + union perf_event *event; + unsigned pos, cnt = 0; + LIST_HEAD(events); + struct event_node *events_array, *node; + int i, ret; + + for (i = 0; i < evlist->nr_mmaps; i++) { + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + cnt += 1; + ret = add_event(evlist, &events, event); + perf_evlist__mmap_consume(evlist, i); + if (ret < 0) + goto out_free_nodes; + } + } + + events_array = calloc(cnt, sizeof(struct event_node)); + if (!events_array) { + pr_debug("calloc failed\n"); + ret = -1; + goto out_free_nodes; + } + + pos = 0; + list_for_each_entry(node, &events, list) + events_array[pos++] = *node; + + qsort(events_array, cnt, sizeof(struct event_node), compar); + + for (pos = 0; pos < cnt; pos++) { + ret = process_event(evlist, events_array[pos].event, + switch_tracking); + if (ret < 0) + goto out_free; + } + + ret = 0; +out_free: + pr_debug("%u events recorded\n", cnt); + free(events_array); +out_free_nodes: + free_event_nodes(&events); + return ret; +} + +/** + * test__switch_tracking - test using sched_switch and tracking events. + * + * This function implements a test that checks that sched_switch events and + * tracking events can be recorded for a workload (current process) using the + * evsel->system_wide and evsel->tracking flags (respectively) with other events + * sometimes enabled or disabled. + */ +int test__switch_tracking(void) +{ + const char *sched_switch = "sched:sched_switch"; + struct switch_tracking switch_tracking = { .tids = NULL, }; + struct record_opts opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, + .target = { + .uses_mmap = true, + }, + }; + struct thread_map *threads = NULL; + struct cpu_map *cpus = NULL; + struct perf_evlist *evlist = NULL; + struct perf_evsel *evsel, *cpu_clocks_evsel, *cycles_evsel; + struct perf_evsel *switch_evsel, *tracking_evsel; + const char *comm; + int err = -1; + + threads = thread_map__new(-1, getpid(), UINT_MAX); + if (!threads) { + pr_debug("thread_map__new failed!\n"); + goto out_err; + } + + cpus = cpu_map__new(NULL); + if (!cpus) { + pr_debug("cpu_map__new failed!\n"); + goto out_err; + } + + evlist = perf_evlist__new(); + if (!evlist) { + pr_debug("perf_evlist__new failed!\n"); + goto out_err; + } + + perf_evlist__set_maps(evlist, cpus, threads); + + /* First event */ + err = parse_events(evlist, "cpu-clock:u"); + if (err) { + pr_debug("Failed to parse event dummy:u\n"); + goto out_err; + } + + cpu_clocks_evsel = perf_evlist__last(evlist); + + /* Second event */ + err = parse_events(evlist, "cycles:u"); + if (err) { + pr_debug("Failed to parse event cycles:u\n"); + goto out_err; + } + + cycles_evsel = perf_evlist__last(evlist); + + /* Third event */ + if (!perf_evlist__can_select_event(evlist, sched_switch)) { + fprintf(stderr, " (no sched_switch)"); + err = 0; + goto out; + } + + err = parse_events(evlist, sched_switch); + if (err) { + pr_debug("Failed to parse event %s\n", sched_switch); + goto out_err; + } + + switch_evsel = perf_evlist__last(evlist); + + perf_evsel__set_sample_bit(switch_evsel, CPU); + perf_evsel__set_sample_bit(switch_evsel, TIME); + + switch_evsel->system_wide = true; + switch_evsel->no_aux_samples = true; + switch_evsel->immediate = true; + + /* Test moving an event to the front */ + if (cycles_evsel == perf_evlist__first(evlist)) { + pr_debug("cycles event already at front"); + goto out_err; + } + perf_evlist__to_front(evlist, cycles_evsel); + if (cycles_evsel != perf_evlist__first(evlist)) { + pr_debug("Failed to move cycles event to front"); + goto out_err; + } + + perf_evsel__set_sample_bit(cycles_evsel, CPU); + perf_evsel__set_sample_bit(cycles_evsel, TIME); + + /* Fourth event */ + err = parse_events(evlist, "dummy:u"); + if (err) { + pr_debug("Failed to parse event dummy:u\n"); + goto out_err; + } + + tracking_evsel = perf_evlist__last(evlist); + + perf_evlist__set_tracking_event(evlist, tracking_evsel); + + tracking_evsel->attr.freq = 0; + tracking_evsel->attr.sample_period = 1; + + perf_evsel__set_sample_bit(tracking_evsel, TIME); + + /* Config events */ + perf_evlist__config(evlist, &opts); + + /* Check moved event is still at the front */ + if (cycles_evsel != perf_evlist__first(evlist)) { + pr_debug("Front event no longer at front"); + goto out_err; + } + + /* Check tracking event is tracking */ + if (!tracking_evsel->attr.mmap || !tracking_evsel->attr.comm) { + pr_debug("Tracking event not tracking\n"); + goto out_err; + } + + /* Check non-tracking events are not tracking */ + evlist__for_each(evlist, evsel) { + if (evsel != tracking_evsel) { + if (evsel->attr.mmap || evsel->attr.comm) { + pr_debug("Non-tracking event is tracking\n"); + goto out_err; + } + } + } + + if (perf_evlist__open(evlist) < 0) { + fprintf(stderr, " (not supported)"); + err = 0; + goto out; + } + + err = perf_evlist__mmap(evlist, UINT_MAX, false); + if (err) { + pr_debug("perf_evlist__mmap failed!\n"); + goto out_err; + } + + perf_evlist__enable(evlist); + + err = perf_evlist__disable_event(evlist, cpu_clocks_evsel); + if (err) { + pr_debug("perf_evlist__disable_event failed!\n"); + goto out_err; + } + + err = spin_sleep(); + if (err) { + pr_debug("spin_sleep failed!\n"); + goto out_err; + } + + comm = "Test COMM 1"; + err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); + if (err) { + pr_debug("PR_SET_NAME failed!\n"); + goto out_err; + } + + err = perf_evlist__disable_event(evlist, cycles_evsel); + if (err) { + pr_debug("perf_evlist__disable_event failed!\n"); + goto out_err; + } + + comm = "Test COMM 2"; + err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); + if (err) { + pr_debug("PR_SET_NAME failed!\n"); + goto out_err; + } + + err = spin_sleep(); + if (err) { + pr_debug("spin_sleep failed!\n"); + goto out_err; + } + + comm = "Test COMM 3"; + err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); + if (err) { + pr_debug("PR_SET_NAME failed!\n"); + goto out_err; + } + + err = perf_evlist__enable_event(evlist, cycles_evsel); + if (err) { + pr_debug("perf_evlist__disable_event failed!\n"); + goto out_err; + } + + comm = "Test COMM 4"; + err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); + if (err) { + pr_debug("PR_SET_NAME failed!\n"); + goto out_err; + } + + err = spin_sleep(); + if (err) { + pr_debug("spin_sleep failed!\n"); + goto out_err; + } + + perf_evlist__disable(evlist); + + switch_tracking.switch_evsel = switch_evsel; + switch_tracking.cycles_evsel = cycles_evsel; + + err = process_events(evlist, &switch_tracking); + + zfree(&switch_tracking.tids); + + if (err) + goto out_err; + + /* Check all 4 comm events were seen i.e. that evsel->tracking works */ + if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] || + !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) { + pr_debug("Missing comm events\n"); + goto out_err; + } + + /* Check cycles event got enabled */ + if (!switch_tracking.cycles_before_comm_1) { + pr_debug("Missing cycles events\n"); + goto out_err; + } + + /* Check cycles event got disabled */ + if (switch_tracking.cycles_between_comm_2_and_comm_3) { + pr_debug("cycles events even though event was disabled\n"); + goto out_err; + } + + /* Check cycles event got enabled again */ + if (!switch_tracking.cycles_after_comm_4) { + pr_debug("Missing cycles events\n"); + goto out_err; + } +out: + if (evlist) { + perf_evlist__disable(evlist); + perf_evlist__delete(evlist); + } else { + cpu_map__delete(cpus); + thread_map__delete(threads); + } + + return err; + +out_err: + err = -1; + goto out; +} diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 5ff3db318f12..87522f01c7ad 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -42,6 +42,7 @@ int test__task_exit(void) .uses_mmap = true, }; const char *argv[] = { "true", NULL }; + char sbuf[STRERR_BUFSIZE]; signal(SIGCHLD, sig_handler); @@ -82,13 +83,14 @@ int test__task_exit(void) err = perf_evlist__open(evlist); if (err < 0) { - pr_debug("Couldn't open the evlist: %s\n", strerror(-err)); + pr_debug("Couldn't open the evlist: %s\n", + strerror_r(-err, sbuf, sizeof(sbuf))); goto out_delete_evlist; } if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index ed64790a395f..be8be10e3957 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -48,6 +48,7 @@ int test__mmap_thread_lookup(void); int test__thread_mg_share(void); int test__hists_output(void); int test__hists_cumulate(void); +int test__switch_tracking(void); #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) #ifdef HAVE_DWARF_UNWIND_SUPPORT diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index a94b11fc5e00..d4cef68176da 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -10,6 +10,7 @@ #include "../../util/pstack.h" #include "../../util/sort.h" #include "../../util/util.h" +#include "../../util/top.h" #include "../../arch/common.h" #include "../browser.h" @@ -228,8 +229,10 @@ static void callchain_node__init_have_children(struct callchain_node *node) { struct callchain_list *chain; - list_for_each_entry(chain, &node->val, list) + if (!list_empty(&node->val)) { + chain = list_entry(node->val.prev, struct callchain_list, list); chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root); + } callchain_node__init_have_children_rb_tree(node); } @@ -474,26 +477,87 @@ static char *callchain_list__sym_name(struct callchain_list *cl, return bf; } +struct callchain_print_arg { + /* for hists browser */ + off_t row_offset; + bool is_current_entry; + + /* for file dump */ + FILE *fp; + int printed; +}; + +typedef void (*print_callchain_entry_fn)(struct hist_browser *browser, + struct callchain_list *chain, + const char *str, int offset, + unsigned short row, + struct callchain_print_arg *arg); + +static void hist_browser__show_callchain_entry(struct hist_browser *browser, + struct callchain_list *chain, + const char *str, int offset, + unsigned short row, + struct callchain_print_arg *arg) +{ + int color, width; + char folded_sign = callchain_list__folded(chain); + + color = HE_COLORSET_NORMAL; + width = browser->b.width - (offset + 2); + if (ui_browser__is_current_entry(&browser->b, row)) { + browser->selection = &chain->ms; + color = HE_COLORSET_SELECTED; + arg->is_current_entry = true; + } + + ui_browser__set_color(&browser->b, color); + hist_browser__gotorc(browser, row, 0); + slsmg_write_nstring(" ", offset); + slsmg_printf("%c ", folded_sign); + slsmg_write_nstring(str, width); +} + +static void hist_browser__fprintf_callchain_entry(struct hist_browser *b __maybe_unused, + struct callchain_list *chain, + const char *str, int offset, + unsigned short row __maybe_unused, + struct callchain_print_arg *arg) +{ + char folded_sign = callchain_list__folded(chain); + + arg->printed += fprintf(arg->fp, "%*s%c %s\n", offset, " ", + folded_sign, str); +} + +typedef bool (*check_output_full_fn)(struct hist_browser *browser, + unsigned short row); + +static bool hist_browser__check_output_full(struct hist_browser *browser, + unsigned short row) +{ + return browser->b.rows == row; +} + +static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_unused, + unsigned short row __maybe_unused) +{ + return false; +} + #define LEVEL_OFFSET_STEP 3 -static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser, - struct callchain_node *chain_node, - u64 total, int level, - unsigned short row, - off_t *row_offset, - bool *is_current_entry) +static int hist_browser__show_callchain(struct hist_browser *browser, + struct rb_root *root, int level, + unsigned short row, u64 total, + print_callchain_entry_fn print, + struct callchain_print_arg *arg, + check_output_full_fn is_output_full) { struct rb_node *node; - int first_row = row, width, offset = level * LEVEL_OFFSET_STEP; - u64 new_total, remaining; - - if (callchain_param.mode == CHAIN_GRAPH_REL) - new_total = chain_node->children_hit; - else - new_total = total; + int first_row = row, offset = level * LEVEL_OFFSET_STEP; + u64 new_total; - remaining = new_total; - node = rb_first(&chain_node->rb_root); + node = rb_first(root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); @@ -503,30 +567,28 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse int first = true; int extra_offset = 0; - remaining -= cumul; - list_for_each_entry(chain, &child->val, list) { char bf[1024], *alloc_str; const char *str; - int color; bool was_first = first; if (first) first = false; - else + else if (level > 1) extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); - if (*row_offset != 0) { - --*row_offset; + if (arg->row_offset != 0) { + arg->row_offset--; goto do_next; } alloc_str = NULL; str = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); - if (was_first) { - double percent = cumul * 100.0 / new_total; + + if (was_first && level > 1) { + double percent = cumul * 100.0 / total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; @@ -534,22 +596,11 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse str = alloc_str; } - color = HE_COLORSET_NORMAL; - width = browser->b.width - (offset + extra_offset + 2); - if (ui_browser__is_current_entry(&browser->b, row)) { - browser->selection = &chain->ms; - color = HE_COLORSET_SELECTED; - *is_current_entry = true; - } + print(browser, chain, str, offset + extra_offset, row, arg); - ui_browser__set_color(&browser->b, color); - hist_browser__gotorc(browser, row, 0); - slsmg_write_nstring(" ", offset + extra_offset); - slsmg_printf("%c ", folded_sign); - slsmg_write_nstring(str, width); free(alloc_str); - if (++row == browser->b.rows) + if (is_output_full(browser, ++row)) goto out; do_next: if (folded_sign == '+') @@ -558,89 +609,21 @@ do_next: if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); - row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total, - new_level, row, row_offset, - is_current_entry); - } - if (row == browser->b.rows) - goto out; - node = next; - } -out: - return row - first_row; -} -static int hist_browser__show_callchain_node(struct hist_browser *browser, - struct callchain_node *node, - int level, unsigned short row, - off_t *row_offset, - bool *is_current_entry) -{ - struct callchain_list *chain; - int first_row = row, - offset = level * LEVEL_OFFSET_STEP, - width = browser->b.width - offset; - char folded_sign = ' '; - - list_for_each_entry(chain, &node->val, list) { - char bf[1024], *s; - int color; - - folded_sign = callchain_list__folded(chain); - - if (*row_offset != 0) { - --*row_offset; - continue; - } + if (callchain_param.mode == CHAIN_GRAPH_REL) + new_total = child->children_hit; + else + new_total = total; - color = HE_COLORSET_NORMAL; - if (ui_browser__is_current_entry(&browser->b, row)) { - browser->selection = &chain->ms; - color = HE_COLORSET_SELECTED; - *is_current_entry = true; + row += hist_browser__show_callchain(browser, &child->rb_root, + new_level, row, new_total, + print, arg, is_output_full); } - - s = callchain_list__sym_name(chain, bf, sizeof(bf), - browser->show_dso); - hist_browser__gotorc(browser, row, 0); - ui_browser__set_color(&browser->b, color); - slsmg_write_nstring(" ", offset); - slsmg_printf("%c ", folded_sign); - slsmg_write_nstring(s, width - 2); - - if (++row == browser->b.rows) - goto out; - } - - if (folded_sign == '-') - row += hist_browser__show_callchain_node_rb_tree(browser, node, - browser->hists->stats.total_period, - level + 1, row, - row_offset, - is_current_entry); -out: - return row - first_row; -} - -static int hist_browser__show_callchain(struct hist_browser *browser, - struct rb_root *chain, - int level, unsigned short row, - off_t *row_offset, - bool *is_current_entry) -{ - struct rb_node *nd; - int first_row = row; - - for (nd = rb_first(chain); nd; nd = rb_next(nd)) { - struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); - - row += hist_browser__show_callchain_node(browser, node, level, - row, row_offset, - is_current_entry); - if (row == browser->b.rows) + if (is_output_full(browser, row)) break; + node = next; } - +out: return row - first_row; } @@ -653,17 +636,18 @@ struct hpp_arg { static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...) { struct hpp_arg *arg = hpp->ptr; - int ret; + int ret, len; va_list args; double percent; va_start(args, fmt); + len = va_arg(args, int); percent = va_arg(args, double); va_end(args); ui_browser__set_percent_color(arg->b, percent, arg->current_entry); - ret = scnprintf(hpp->buf, hpp->size, fmt, percent); + ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent); slsmg_printf("%s", hpp->buf); advance_hpp(hpp, ret); @@ -677,12 +661,12 @@ static u64 __hpp_get_##_field(struct hist_entry *he) \ } \ \ static int \ -hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ +hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ - return __hpp__fmt(hpp, he, __hpp_get_##_field, " %6.2f%%", \ - __hpp__slsmg_color_printf, true); \ + return hpp__fmt(fmt, hpp, he, __hpp_get_##_field, " %*.2f%%", \ + __hpp__slsmg_color_printf, true); \ } #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ @@ -692,18 +676,20 @@ static u64 __hpp_get_acc_##_field(struct hist_entry *he) \ } \ \ static int \ -hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ +hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ if (!symbol_conf.cumulate_callchain) { \ - int ret = scnprintf(hpp->buf, hpp->size, "%8s", "N/A"); \ + int len = fmt->user_len ?: fmt->len; \ + int ret = scnprintf(hpp->buf, hpp->size, \ + "%*s", len, "N/A"); \ slsmg_printf("%s", hpp->buf); \ \ return ret; \ } \ - return __hpp__fmt(hpp, he, __hpp_get_acc_##_field, " %6.2f%%", \ - __hpp__slsmg_color_printf, true); \ + return hpp__fmt(fmt, hpp, he, __hpp_get_acc_##_field, \ + " %*.2f%%", __hpp__slsmg_color_printf, true); \ } __HPP_COLOR_PERCENT_FN(overhead, period) @@ -812,10 +798,21 @@ static int hist_browser__show_entry(struct hist_browser *browser, --row_offset; if (folded_sign == '-' && row != browser->b.rows) { - printed += hist_browser__show_callchain(browser, &entry->sorted_chain, - 1, row, &row_offset, - ¤t_entry); - if (current_entry) + u64 total = hists__total_period(entry->hists); + struct callchain_print_arg arg = { + .row_offset = row_offset, + .is_current_entry = current_entry, + }; + + if (symbol_conf.cumulate_callchain) + total = entry->stat_acc->period; + + printed += hist_browser__show_callchain(browser, + &entry->sorted_chain, 1, row, total, + hist_browser__show_callchain_entry, &arg, + hist_browser__check_output_full); + + if (arg.is_current_entry) browser->he_selection = entry; } @@ -847,9 +844,6 @@ static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists) if (perf_hpp__should_skip(fmt)) continue; - /* We need to add the length of the columns header. */ - perf_hpp__reset_width(fmt, hists); - ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists)); if (advance_hpp_check(&dummy_hpp, ret)) break; @@ -1074,113 +1068,21 @@ do_offset: } } -static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser, - struct callchain_node *chain_node, - u64 total, int level, - FILE *fp) -{ - struct rb_node *node; - int offset = level * LEVEL_OFFSET_STEP; - u64 new_total, remaining; - int printed = 0; - - if (callchain_param.mode == CHAIN_GRAPH_REL) - new_total = chain_node->children_hit; - else - new_total = total; - - remaining = new_total; - node = rb_first(&chain_node->rb_root); - while (node) { - struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); - struct rb_node *next = rb_next(node); - u64 cumul = callchain_cumul_hits(child); - struct callchain_list *chain; - char folded_sign = ' '; - int first = true; - int extra_offset = 0; - - remaining -= cumul; - - list_for_each_entry(chain, &child->val, list) { - char bf[1024], *alloc_str; - const char *str; - bool was_first = first; - - if (first) - first = false; - else - extra_offset = LEVEL_OFFSET_STEP; - - folded_sign = callchain_list__folded(chain); - - alloc_str = NULL; - str = callchain_list__sym_name(chain, bf, sizeof(bf), - browser->show_dso); - if (was_first) { - double percent = cumul * 100.0 / new_total; - - if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) - str = "Not enough memory!"; - else - str = alloc_str; - } - - printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str); - free(alloc_str); - if (folded_sign == '+') - break; - } - - if (folded_sign == '-') { - const int new_level = level + (extra_offset ? 2 : 1); - printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total, - new_level, fp); - } - - node = next; - } - - return printed; -} - -static int hist_browser__fprintf_callchain_node(struct hist_browser *browser, - struct callchain_node *node, - int level, FILE *fp) -{ - struct callchain_list *chain; - int offset = level * LEVEL_OFFSET_STEP; - char folded_sign = ' '; - int printed = 0; - - list_for_each_entry(chain, &node->val, list) { - char bf[1024], *s; - - folded_sign = callchain_list__folded(chain); - s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); - printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); - } - - if (folded_sign == '-') - printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node, - browser->hists->stats.total_period, - level + 1, fp); - return printed; -} - static int hist_browser__fprintf_callchain(struct hist_browser *browser, - struct rb_root *chain, int level, FILE *fp) + struct hist_entry *he, FILE *fp) { - struct rb_node *nd; - int printed = 0; + u64 total = hists__total_period(he->hists); + struct callchain_print_arg arg = { + .fp = fp, + }; - for (nd = rb_first(chain); nd; nd = rb_next(nd)) { - struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); + if (symbol_conf.cumulate_callchain) + total = he->stat_acc->period; - printed += hist_browser__fprintf_callchain_node(browser, node, level, fp); - } - - return printed; + hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total, + hist_browser__fprintf_callchain_entry, &arg, + hist_browser__check_dump_full); + return arg.printed; } static int hist_browser__fprintf_entry(struct hist_browser *browser, @@ -1219,7 +1121,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, printed += fprintf(fp, "%s\n", rtrim(s)); if (folded_sign == '-') - printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp); + printed += hist_browser__fprintf_callchain(browser, he, fp); return printed; } @@ -1498,6 +1400,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, char buf[64]; char script_opt[64]; int delay_secs = hbt ? hbt->refresh : 0; + struct perf_hpp_fmt *fmt; #define HIST_BROWSER_HELP_COMMON \ "h/?/F1 Show this window\n" \ @@ -1529,6 +1432,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "P Print histograms to perf.hist.N\n" "t Zoom into current Thread\n" "V Verbose (DSO names in callchains, etc)\n" + "z Toggle zeroing of samples\n" "/ Filter symbol by name"; if (browser == NULL) @@ -1547,6 +1451,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, memset(options, 0, sizeof(options)); + perf_hpp__for_each_format(fmt) + perf_hpp__reset_width(fmt, hists); + + if (symbol_conf.col_width_list_str) + perf_hpp__set_user_width(symbol_conf.col_width_list_str); + while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; @@ -1623,6 +1533,13 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, case 'F': symbol_conf.filter_relative ^= 1; continue; + case 'z': + if (!is_report_browser(hbt)) { + struct perf_top *top = hbt->arg; + + top->zero = !top->zero; + } + continue; case K_F1: case 'h': case '?': diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 6ca60e482cdc..f3fa4258b256 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -11,6 +11,7 @@ static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...) { int ret = 0; + int len; va_list args; double percent; const char *markup; @@ -18,6 +19,7 @@ static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...) size_t size = hpp->size; va_start(args, fmt); + len = va_arg(args, int); percent = va_arg(args, double); va_end(args); @@ -25,7 +27,7 @@ static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...) if (markup) ret += scnprintf(buf, size, markup); - ret += scnprintf(buf + ret, size - ret, fmt, percent); + ret += scnprintf(buf + ret, size - ret, fmt, len, percent); if (markup) ret += scnprintf(buf + ret, size - ret, "</span>"); @@ -39,12 +41,12 @@ static u64 he_get_##_field(struct hist_entry *he) \ return he->stat._field; \ } \ \ -static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ +static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ - return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ - __percent_color_snprintf, true); \ + return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ + __percent_color_snprintf, true); \ } #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ @@ -57,8 +59,8 @@ static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ - return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ - __percent_color_snprintf, true); \ + return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ + __percent_color_snprintf, true); \ } __HPP_COLOR_PERCENT_FN(overhead, period) @@ -205,10 +207,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, if (perf_hpp__is_sort_entry(fmt)) sym_col = col_idx; - fmt->header(fmt, &hpp, hists_to_evsel(hists)); - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, ltrim(s), + -1, fmt->name, renderer, "markup", col_idx++, NULL); } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 498adb23c02e..2af18376b077 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -15,9 +15,9 @@ __ret; \ }) -int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, - hpp_field_fn get_field, const char *fmt, - hpp_snprint_fn print_fn, bool fmt_percent) +static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, int len, + hpp_snprint_fn print_fn, bool fmt_percent) { int ret; struct hists *hists = he->hists; @@ -32,9 +32,9 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, if (total) percent = 100.0 * get_field(he) / total; - ret = hpp__call_print_fn(hpp, print_fn, fmt, percent); + ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); } else - ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he)); + ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); if (perf_evsel__is_group_event(evsel)) { int prev_idx, idx_delta; @@ -60,19 +60,19 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, */ if (fmt_percent) { ret += hpp__call_print_fn(hpp, print_fn, - fmt, 0.0); + fmt, len, 0.0); } else { ret += hpp__call_print_fn(hpp, print_fn, - fmt, 0ULL); + fmt, len, 0ULL); } } if (fmt_percent) { - ret += hpp__call_print_fn(hpp, print_fn, fmt, + ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 100.0 * period / total); } else { ret += hpp__call_print_fn(hpp, print_fn, fmt, - period); + len, period); } prev_idx = perf_evsel__group_idx(evsel); @@ -86,10 +86,10 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, */ if (fmt_percent) { ret += hpp__call_print_fn(hpp, print_fn, - fmt, 0.0); + fmt, len, 0.0); } else { ret += hpp__call_print_fn(hpp, print_fn, - fmt, 0ULL); + fmt, len, 0ULL); } } } @@ -104,16 +104,35 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, return ret; } -int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, - hpp_field_fn get_field, const char *fmt, - hpp_snprint_fn print_fn, bool fmt_percent) +int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) +{ + int len = fmt->user_len ?: fmt->len; + + if (symbol_conf.field_sep) { + return __hpp__fmt(hpp, he, get_field, fmtstr, 1, + print_fn, fmt_percent); + } + + if (fmt_percent) + len -= 2; /* 2 for a space and a % sign */ + else + len -= 1; + + return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); +} + +int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) { if (!symbol_conf.cumulate_callchain) { - return snprintf(hpp->buf, hpp->size, "%*s", - fmt_percent ? 8 : 12, "N/A"); + int len = fmt->user_len ?: fmt->len; + return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); } - return __hpp__fmt(hpp, he, get_field, fmt, print_fn, fmt_percent); + return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); } static int field_cmp(u64 field_a, u64 field_b) @@ -190,30 +209,26 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, return ret; } -#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ -static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ - struct perf_hpp *hpp, \ - struct perf_evsel *evsel) \ -{ \ - int len = _min_width; \ - \ - if (symbol_conf.event_group) \ - len = max(len, evsel->nr_members * _unit_width); \ - \ - return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \ -} - -#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \ -static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ - struct perf_hpp *hpp __maybe_unused, \ - struct perf_evsel *evsel) \ -{ \ - int len = _min_width; \ - \ - if (symbol_conf.event_group) \ - len = max(len, evsel->nr_members * _unit_width); \ - \ - return len; \ +static int hpp__width_fn(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp __maybe_unused, + struct perf_evsel *evsel) +{ + int len = fmt->user_len ?: fmt->len; + + if (symbol_conf.event_group) + len = max(len, evsel->nr_members * fmt->len); + + if (len < (int)strlen(fmt->name)) + len = strlen(fmt->name); + + return len; +} + +static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel) +{ + int len = hpp__width_fn(fmt, hpp, evsel); + return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); } static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) @@ -221,11 +236,12 @@ static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) va_list args; ssize_t ssize = hpp->size; double percent; - int ret; + int ret, len; va_start(args, fmt); + len = va_arg(args, int); percent = va_arg(args, double); - ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent); + ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); va_end(args); return (ret >= ssize) ? (ssize - 1) : ret; @@ -250,20 +266,19 @@ static u64 he_get_##_field(struct hist_entry *he) \ return he->stat._field; \ } \ \ -static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ +static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ - return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ - hpp_color_scnprintf, true); \ + return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ + hpp_color_scnprintf, true); \ } #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ -static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ +static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ - const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ - return __hpp__fmt(hpp, he, he_get_##_field, fmt, \ - hpp_entry_scnprintf, true); \ + return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ + hpp_entry_scnprintf, true); \ } #define __HPP_SORT_FN(_type, _field) \ @@ -278,20 +293,19 @@ static u64 he_get_acc_##_field(struct hist_entry *he) \ return he->stat_acc->_field; \ } \ \ -static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ +static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ - return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ - hpp_color_scnprintf, true); \ + return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ + hpp_color_scnprintf, true); \ } #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ -static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ +static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ - const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ - return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, fmt, \ - hpp_entry_scnprintf, true); \ + return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ + hpp_entry_scnprintf, true); \ } #define __HPP_SORT_ACC_FN(_type, _field) \ @@ -306,12 +320,11 @@ static u64 he_get_raw_##_field(struct hist_entry *he) \ return he->stat._field; \ } \ \ -static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ +static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ - const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \ - return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \ - hpp_entry_scnprintf, false); \ + return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ + hpp_entry_scnprintf, false); \ } #define __HPP_SORT_RAW_FN(_type, _field) \ @@ -321,37 +334,29 @@ static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ } -#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \ -__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ -__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +#define HPP_PERCENT_FNS(_type, _field) \ __HPP_COLOR_PERCENT_FN(_type, _field) \ __HPP_ENTRY_PERCENT_FN(_type, _field) \ __HPP_SORT_FN(_type, _field) -#define HPP_PERCENT_ACC_FNS(_type, _str, _field, _min_width, _unit_width)\ -__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ -__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +#define HPP_PERCENT_ACC_FNS(_type, _field) \ __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ __HPP_SORT_ACC_FN(_type, _field) -#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \ -__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ -__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +#define HPP_RAW_FNS(_type, _field) \ __HPP_ENTRY_RAW_FN(_type, _field) \ __HPP_SORT_RAW_FN(_type, _field) -__HPP_HEADER_FN(overhead_self, "Self", 8, 8) - -HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8) -HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8) -HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8) -HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8) -HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8) -HPP_PERCENT_ACC_FNS(overhead_acc, "Children", period, 8, 8) +HPP_PERCENT_FNS(overhead, period) +HPP_PERCENT_FNS(overhead_sys, period_sys) +HPP_PERCENT_FNS(overhead_us, period_us) +HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) +HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) +HPP_PERCENT_ACC_FNS(overhead_acc, period) -HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12) -HPP_RAW_FNS(period, "Period", period, 12, 12) +HPP_RAW_FNS(samples, nr_events) +HPP_RAW_FNS(period, period) static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, struct hist_entry *b __maybe_unused) @@ -359,47 +364,50 @@ static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, return 0; } -#define HPP__COLOR_PRINT_FNS(_name) \ +#define HPP__COLOR_PRINT_FNS(_name, _fn) \ { \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .color = hpp__color_ ## _name, \ - .entry = hpp__entry_ ## _name, \ + .name = _name, \ + .header = hpp__header_fn, \ + .width = hpp__width_fn, \ + .color = hpp__color_ ## _fn, \ + .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ - .sort = hpp__sort_ ## _name, \ + .sort = hpp__sort_ ## _fn, \ } -#define HPP__COLOR_ACC_PRINT_FNS(_name) \ +#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn) \ { \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .color = hpp__color_ ## _name, \ - .entry = hpp__entry_ ## _name, \ + .name = _name, \ + .header = hpp__header_fn, \ + .width = hpp__width_fn, \ + .color = hpp__color_ ## _fn, \ + .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ - .sort = hpp__sort_ ## _name, \ + .sort = hpp__sort_ ## _fn, \ } -#define HPP__PRINT_FNS(_name) \ +#define HPP__PRINT_FNS(_name, _fn) \ { \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .entry = hpp__entry_ ## _name, \ + .name = _name, \ + .header = hpp__header_fn, \ + .width = hpp__width_fn, \ + .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ - .sort = hpp__sort_ ## _name, \ + .sort = hpp__sort_ ## _fn, \ } struct perf_hpp_fmt perf_hpp__format[] = { - HPP__COLOR_PRINT_FNS(overhead), - HPP__COLOR_PRINT_FNS(overhead_sys), - HPP__COLOR_PRINT_FNS(overhead_us), - HPP__COLOR_PRINT_FNS(overhead_guest_sys), - HPP__COLOR_PRINT_FNS(overhead_guest_us), - HPP__COLOR_ACC_PRINT_FNS(overhead_acc), - HPP__PRINT_FNS(samples), - HPP__PRINT_FNS(period) + HPP__COLOR_PRINT_FNS("Overhead", overhead), + HPP__COLOR_PRINT_FNS("sys", overhead_sys), + HPP__COLOR_PRINT_FNS("usr", overhead_us), + HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys), + HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us), + HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc), + HPP__PRINT_FNS("Samples", samples), + HPP__PRINT_FNS("Period", period) }; LIST_HEAD(perf_hpp__list); @@ -444,14 +452,12 @@ void perf_hpp__init(void) /* * If user specified field order, no need to setup default fields. */ - if (field_order) + if (is_strict_order(field_order)) return; if (symbol_conf.cumulate_callchain) { perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC); - - perf_hpp__format[PERF_HPP__OVERHEAD].header = - hpp__header_overhead_self; + perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; } perf_hpp__column_enable(PERF_HPP__OVERHEAD); @@ -513,11 +519,11 @@ void perf_hpp__column_disable(unsigned col) void perf_hpp__cancel_cumulate(void) { - if (field_order) + if (is_strict_order(field_order)) return; perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC); - perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead; + perf_hpp__format[PERF_HPP__OVERHEAD].name = "Overhead"; } void perf_hpp__setup_output_field(void) @@ -622,3 +628,59 @@ unsigned int hists__sort_list_width(struct hists *hists) return ret; } + +void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) +{ + int idx; + + if (perf_hpp__is_sort_entry(fmt)) + return perf_hpp__reset_sort_width(fmt, hists); + + for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { + if (fmt == &perf_hpp__format[idx]) + break; + } + + if (idx == PERF_HPP__MAX_INDEX) + return; + + switch (idx) { + case PERF_HPP__OVERHEAD: + case PERF_HPP__OVERHEAD_SYS: + case PERF_HPP__OVERHEAD_US: + case PERF_HPP__OVERHEAD_ACC: + fmt->len = 8; + break; + + case PERF_HPP__OVERHEAD_GUEST_SYS: + case PERF_HPP__OVERHEAD_GUEST_US: + fmt->len = 9; + break; + + case PERF_HPP__SAMPLES: + case PERF_HPP__PERIOD: + fmt->len = 12; + break; + + default: + break; + } +} + +void perf_hpp__set_user_width(const char *width_list_str) +{ + struct perf_hpp_fmt *fmt; + const char *ptr = width_list_str; + + perf_hpp__for_each_format(fmt) { + char *p; + + int len = strtol(ptr, &p, 10); + fmt->user_len = len; + + if (*p == ',') + ptr = p + 1; + else + break; + } +} diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 40af0acb4fe9..15b451acbde6 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -395,10 +395,12 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, init_rem_hits(); - perf_hpp__for_each_format(fmt) perf_hpp__reset_width(fmt, hists); + if (symbol_conf.col_width_list_str) + perf_hpp__set_user_width(symbol_conf.col_width_list_str); + if (!show_header) goto print_entries; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 809b4c50beae..36437527dbb3 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -232,9 +232,16 @@ static int mov__parse(struct ins_operands *ops) return -1; target = ++s; + comment = strchr(s, '#'); - while (s[0] != '\0' && !isspace(s[0])) - ++s; + if (comment != NULL) + s = comment - 1; + else + s = strchr(s, '\0') - 1; + + while (s > target && isspace(s[0])) + --s; + s++; prev = *s; *s = '\0'; @@ -244,7 +251,6 @@ static int mov__parse(struct ins_operands *ops) if (ops->target.raw == NULL) goto out_free_source; - comment = strchr(s, '#'); if (comment == NULL) return 0; @@ -899,10 +905,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) struct kcore_extract kce; bool delete_extract = false; - if (filename) { - snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", - symbol_conf.symfs, filename); - } + if (filename) + symbol__join_symfs(symfs_filename, filename); if (filename == NULL) { if (dso->has_build_id) { @@ -922,8 +926,7 @@ fallback: * DSO is the same as when 'perf record' ran. */ filename = (char *)dso->long_name; - snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", - symbol_conf.symfs, filename); + symbol__join_symfs(symfs_filename, filename); free_filename = false; } diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 7b176dd02e1a..5cf9e1b5989d 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -22,6 +22,7 @@ typedef int (*config_fn_t)(const char *, const char *, void *); extern int perf_default_config(const char *, const char *, void *); extern int perf_config(config_fn_t fn, void *); extern int perf_config_int(const char *, const char *); +extern u64 perf_config_u64(const char *, const char *); extern int perf_config_bool(const char *, const char *); extern int config_error_nonbool(const char *); extern const char *perf_config_dirname(const char *, const char *); diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 437ee09727e6..08f0fbf5527c 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -28,74 +28,55 @@ __thread struct callchain_cursor callchain_cursor; int parse_callchain_report_opt(const char *arg) { - char *tok, *tok2; + char *tok; char *endptr; + bool minpcnt_set = false; symbol_conf.use_callchain = true; if (!arg) return 0; - tok = strtok((char *)arg, ","); - if (!tok) - return -1; - - /* get the output mode */ - if (!strncmp(tok, "graph", strlen(arg))) { - callchain_param.mode = CHAIN_GRAPH_ABS; - - } else if (!strncmp(tok, "flat", strlen(arg))) { - callchain_param.mode = CHAIN_FLAT; - } else if (!strncmp(tok, "fractal", strlen(arg))) { - callchain_param.mode = CHAIN_GRAPH_REL; - } else if (!strncmp(tok, "none", strlen(arg))) { - callchain_param.mode = CHAIN_NONE; - symbol_conf.use_callchain = false; - return 0; - } else { - return -1; - } - - /* get the min percentage */ - tok = strtok(NULL, ","); - if (!tok) - goto setup; - - callchain_param.min_percent = strtod(tok, &endptr); - if (tok == endptr) - return -1; + while ((tok = strtok((char *)arg, ",")) != NULL) { + if (!strncmp(tok, "none", strlen(tok))) { + callchain_param.mode = CHAIN_NONE; + symbol_conf.use_callchain = false; + return 0; + } - /* get the print limit */ - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; + /* try to get the output mode */ + if (!strncmp(tok, "graph", strlen(tok))) + callchain_param.mode = CHAIN_GRAPH_ABS; + else if (!strncmp(tok, "flat", strlen(tok))) + callchain_param.mode = CHAIN_FLAT; + else if (!strncmp(tok, "fractal", strlen(tok))) + callchain_param.mode = CHAIN_GRAPH_REL; + /* try to get the call chain order */ + else if (!strncmp(tok, "caller", strlen(tok))) + callchain_param.order = ORDER_CALLER; + else if (!strncmp(tok, "callee", strlen(tok))) + callchain_param.order = ORDER_CALLEE; + /* try to get the sort key */ + else if (!strncmp(tok, "function", strlen(tok))) + callchain_param.key = CCKEY_FUNCTION; + else if (!strncmp(tok, "address", strlen(tok))) + callchain_param.key = CCKEY_ADDRESS; + /* try to get the min percent */ + else if (!minpcnt_set) { + callchain_param.min_percent = strtod(tok, &endptr); + if (tok == endptr) + return -1; + minpcnt_set = true; + } else { + /* try print limit at last */ + callchain_param.print_limit = strtoul(tok, &endptr, 0); + if (tok == endptr) + return -1; + } - if (tok2[0] != 'c') { - callchain_param.print_limit = strtoul(tok2, &endptr, 0); - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; + arg = NULL; } - /* get the call chain order */ - if (!strncmp(tok2, "caller", strlen("caller"))) - callchain_param.order = ORDER_CALLER; - else if (!strncmp(tok2, "callee", strlen("callee"))) - callchain_param.order = ORDER_CALLEE; - else - return -1; - - /* Get the sort key */ - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; - if (!strncmp(tok2, "function", strlen("function"))) - callchain_param.key = CCKEY_FUNCTION; - else if (!strncmp(tok2, "address", strlen("address"))) - callchain_param.key = CCKEY_ADDRESS; - else - return -1; -setup: if (callchain_register_param(&callchain_param) < 0) { pr_err("Can't register callchain params\n"); return -1; diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index c5d05ec17220..47b78b3f0325 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c @@ -1,7 +1,9 @@ +#include <sched.h> #include "util.h" #include "../perf.h" #include "cloexec.h" #include "asm/bug.h" +#include "debug.h" static unsigned long flag = PERF_FLAG_FD_CLOEXEC; @@ -9,15 +11,30 @@ static int perf_flag_probe(void) { /* use 'safest' configuration as used in perf_evsel__fallback() */ struct perf_event_attr attr = { - .type = PERF_COUNT_SW_CPU_CLOCK, + .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, + .exclude_kernel = 1, }; int fd; int err; + int cpu; + pid_t pid = -1; + char sbuf[STRERR_BUFSIZE]; - /* check cloexec flag */ - fd = sys_perf_event_open(&attr, 0, -1, -1, - PERF_FLAG_FD_CLOEXEC); + cpu = sched_getcpu(); + if (cpu < 0) + cpu = 0; + + while (1) { + /* check cloexec flag */ + fd = sys_perf_event_open(&attr, pid, cpu, -1, + PERF_FLAG_FD_CLOEXEC); + if (fd < 0 && pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + break; + } err = errno; if (fd >= 0) { @@ -25,17 +42,17 @@ static int perf_flag_probe(void) return 1; } - WARN_ONCE(err != EINVAL, + WARN_ONCE(err != EINVAL && err != EBUSY, "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n", - err, strerror(err)); + err, strerror_r(err, sbuf, sizeof(sbuf))); /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ - fd = sys_perf_event_open(&attr, 0, -1, -1, 0); + fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); err = errno; - if (WARN_ONCE(fd < 0, + if (WARN_ONCE(fd < 0 && err != EBUSY, "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", - err, strerror(err))) + err, strerror_r(err, sbuf, sizeof(sbuf)))) return -1; close(fd); diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c index 87b8672eb413..f4654183d391 100644 --- a/tools/perf/util/color.c +++ b/tools/perf/util/color.c @@ -335,3 +335,19 @@ int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...) va_end(args); return value_color_snprintf(bf, size, fmt, percent); } + +int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...) +{ + va_list args; + int len; + double percent; + const char *color; + + va_start(args, fmt); + len = va_arg(args, int); + percent = va_arg(args, double); + va_end(args); + + color = get_percent_color(percent); + return color_snprintf(bf, size, color, fmt, len, percent); +} diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 7ff30a62a132..0a594b8a0c26 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -41,6 +41,7 @@ int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); int value_color_snprintf(char *bf, size_t size, const char *fmt, double value); int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...); +int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...); int percent_color_fprintf(FILE *fp, const char *fmt, double percent); const char *get_percent_color(double percent); diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index f9e777629e21..b2bb59df65e1 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -74,7 +74,7 @@ static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) return new; } -struct comm *comm__new(const char *str, u64 timestamp) +struct comm *comm__new(const char *str, u64 timestamp, bool exec) { struct comm *comm = zalloc(sizeof(*comm)); @@ -82,6 +82,7 @@ struct comm *comm__new(const char *str, u64 timestamp) return NULL; comm->start = timestamp; + comm->exec = exec; comm->comm_str = comm_str__findnew(str, &comm_str_root); if (!comm->comm_str) { @@ -94,7 +95,7 @@ struct comm *comm__new(const char *str, u64 timestamp) return comm; } -int comm__override(struct comm *comm, const char *str, u64 timestamp) +int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec) { struct comm_str *new, *old = comm->comm_str; @@ -106,6 +107,8 @@ int comm__override(struct comm *comm, const char *str, u64 timestamp) comm_str__put(old); comm->comm_str = new; comm->start = timestamp; + if (exec) + comm->exec = true; return 0; } diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h index fac5bd51befc..51c10ab257f8 100644 --- a/tools/perf/util/comm.h +++ b/tools/perf/util/comm.h @@ -11,11 +11,13 @@ struct comm { struct comm_str *comm_str; u64 start; struct list_head list; + bool exec; }; void comm__free(struct comm *comm); -struct comm *comm__new(const char *str, u64 timestamp); +struct comm *comm__new(const char *str, u64 timestamp, bool exec); const char *comm__str(const struct comm *comm); -int comm__override(struct comm *comm, const char *str, u64 timestamp); +int comm__override(struct comm *comm, const char *str, u64 timestamp, + bool exec); #endif /* __PERF_COMM_H */ diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 1e5e2e5af6b1..9970b8b0190b 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -286,6 +286,21 @@ static int parse_unit_factor(const char *end, unsigned long *val) return 0; } +static int perf_parse_llong(const char *value, long long *ret) +{ + if (value && *value) { + char *end; + long long val = strtoll(value, &end, 0); + unsigned long factor = 1; + + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + static int perf_parse_long(const char *value, long *ret) { if (value && *value) { @@ -307,6 +322,15 @@ static void die_bad_config(const char *name) die("bad config value for '%s'", name); } +u64 perf_config_u64(const char *name, const char *value) +{ + long long ret = 0; + + if (!perf_parse_llong(value, &ret)) + die_bad_config(name); + return (u64) ret; +} + int perf_config_int(const char *name, const char *value) { long ret = 0; diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 29d720cf5844..1921942fc2e0 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -50,12 +50,14 @@ static int open_file_read(struct perf_data_file *file) { struct stat st; int fd; + char sbuf[STRERR_BUFSIZE]; fd = open(file->path, O_RDONLY); if (fd < 0) { int err = errno; - pr_err("failed to open %s: %s", file->path, strerror(err)); + pr_err("failed to open %s: %s", file->path, + strerror_r(err, sbuf, sizeof(sbuf))); if (err == ENOENT && !strcmp(file->path, "perf.data")) pr_err(" (try 'perf record' first)"); pr_err("\n"); @@ -88,6 +90,7 @@ static int open_file_read(struct perf_data_file *file) static int open_file_write(struct perf_data_file *file) { int fd; + char sbuf[STRERR_BUFSIZE]; if (check_backup(file)) return -1; @@ -95,7 +98,8 @@ static int open_file_write(struct perf_data_file *file) fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR); if (fd < 0) - pr_err("failed to open %s : %s\n", file->path, strerror(errno)); + pr_err("failed to open %s : %s\n", file->path, + strerror_r(errno, sbuf, sizeof(sbuf))); return fd; } diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 71d419362634..ba357f3226c6 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -13,8 +13,12 @@ #include "util.h" #include "target.h" +#define NSECS_PER_SEC 1000000000ULL +#define NSECS_PER_USEC 1000ULL + int verbose; bool dump_trace = false, quiet = false; +int debug_ordered_events; static int _eprintf(int level, int var, const char *fmt, va_list args) { @@ -42,6 +46,35 @@ int eprintf(int level, int var, const char *fmt, ...) return ret; } +static int __eprintf_time(u64 t, const char *fmt, va_list args) +{ + int ret = 0; + u64 secs, usecs, nsecs = t; + + secs = nsecs / NSECS_PER_SEC; + nsecs -= secs * NSECS_PER_SEC; + usecs = nsecs / NSECS_PER_USEC; + + ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ", + secs, usecs); + ret += vfprintf(stderr, fmt, args); + return ret; +} + +int eprintf_time(int level, int var, u64 t, const char *fmt, ...) +{ + int ret = 0; + va_list args; + + if (var >= level) { + va_start(args, fmt); + ret = __eprintf_time(t, fmt, args); + va_end(args); + } + + return ret; +} + /* * Overloading libtraceevent standard info print * function, display with -v in perf. @@ -110,7 +143,8 @@ static struct debug_variable { const char *name; int *ptr; } debug_variables[] = { - { .name = "verbose", .ptr = &verbose }, + { .name = "verbose", .ptr = &verbose }, + { .name = "ordered-events", .ptr = &debug_ordered_events}, { .name = NULL, } }; diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 89fb6b0f7ab2..be264d6f3b30 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -3,6 +3,7 @@ #define __PERF_DEBUG_H #include <stdbool.h> +#include <string.h> #include "event.h" #include "../ui/helpline.h" #include "../ui/progress.h" @@ -10,6 +11,7 @@ extern int verbose; extern bool quiet, dump_trace; +extern int debug_ordered_events; #ifndef pr_fmt #define pr_fmt(fmt) fmt @@ -29,6 +31,14 @@ extern bool quiet, dump_trace; #define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) #define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_time_N(n, var, t, fmt, ...) \ + eprintf_time(n, var, t, fmt, ##__VA_ARGS__) + +#define pr_oe_time(t, fmt, ...) pr_time_N(1, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_oe_time2(t, fmt, ...) pr_time_N(2, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__) + +#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */ + int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(union perf_event *event); @@ -38,6 +48,7 @@ int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); void pr_stat(const char *fmt, ...); int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4))); +int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5))); int perf_debug_option(const char *str); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 90d02c661dd4..55e39dc1bcda 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -37,6 +37,7 @@ int dso__read_binary_type_filename(const struct dso *dso, { char build_id_hex[BUILD_ID_SIZE * 2 + 1]; int ret = 0; + size_t len; switch (type) { case DSO_BINARY_TYPE__DEBUGLINK: { @@ -60,26 +61,25 @@ int dso__read_binary_type_filename(const struct dso *dso, break; case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: - snprintf(filename, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, dso->long_name); + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s.debug", dso->long_name); break; case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: - snprintf(filename, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, dso->long_name); + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s", dso->long_name); break; case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: { const char *last_slash; - size_t len; size_t dir_size; last_slash = dso->long_name + dso->long_name_len; while (last_slash != dso->long_name && *last_slash != '/') last_slash--; - len = scnprintf(filename, size, "%s", symbol_conf.symfs); + len = __symbol__join_symfs(filename, size, ""); dir_size = last_slash - dso->long_name + 2; if (dir_size > (size - len)) { ret = -1; @@ -100,26 +100,24 @@ int dso__read_binary_type_filename(const struct dso *dso, build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex); - snprintf(filename, size, - "%s/usr/lib/debug/.build-id/%.2s/%s.debug", - symbol_conf.symfs, build_id_hex, build_id_hex + 2); + len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); + snprintf(filename + len, size - len, "%.2s/%s.debug", + build_id_hex, build_id_hex + 2); break; case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: - snprintf(filename, size, "%s%s", - symbol_conf.symfs, dso->long_name); + __symbol__join_symfs(filename, size, dso->long_name); break; case DSO_BINARY_TYPE__GUEST_KMODULE: - snprintf(filename, size, "%s%s%s", symbol_conf.symfs, - root_dir, dso->long_name); + path__join3(filename, size, symbol_conf.symfs, + root_dir, dso->long_name); break; case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: - snprintf(filename, size, "%s%s", symbol_conf.symfs, - dso->long_name); + __symbol__join_symfs(filename, size, dso->long_name); break; case DSO_BINARY_TYPE__KCORE: @@ -164,13 +162,15 @@ static void close_first_dso(void); static int do_open(char *name) { int fd; + char sbuf[STRERR_BUFSIZE]; do { fd = open(name, O_RDONLY); if (fd >= 0) return fd; - pr_debug("dso open failed, mmap: %s\n", strerror(errno)); + pr_debug("dso open failed, mmap: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); if (!dso__data_open_cnt || errno != EMFILE) break; @@ -532,10 +532,12 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) static int data_file_size(struct dso *dso) { struct stat st; + char sbuf[STRERR_BUFSIZE]; if (!dso->data.file_size) { if (fstat(dso->data.fd, &st)) { - pr_err("dso mmap failed, fstat: %s\n", strerror(errno)); + pr_err("dso mmap failed, fstat: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); return -1; } dso->data.file_size = st.st_size; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1398c83d896d..ed558191c0b3 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -784,9 +784,9 @@ try_again: * "[vdso]" dso, but for now lets use the old trick of looking * in the whole kernel symbol list. */ - if ((long long)al->addr < 0 && - cpumode == PERF_RECORD_MISC_USER && - machine && mg != &machine->kmaps) { + if (cpumode == PERF_RECORD_MISC_USER && machine && + mg != &machine->kmaps && + machine__kernel_ip(machine, al->addr)) { mg = &machine->kmaps; load_map = true; goto try_again; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 94d6976180da..7eb7107731ec 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -156,6 +156,8 @@ struct perf_sample { u32 cpu; u32 raw_size; u64 data_src; + u32 flags; + u16 insn_len; void *raw_data; struct ip_callchain *callchain; struct branch_stack *branch_stack; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 814e954c1318..a3e28b49128a 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -122,6 +122,7 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) { list_add_tail(&entry->node, &evlist->entries); entry->idx = evlist->nr_entries; + entry->tracking = !entry->idx; if (!evlist->nr_entries++) perf_evlist__set_id_pos(evlist); @@ -265,17 +266,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist, return 0; } +static int perf_evlist__nr_threads(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + if (evsel->system_wide) + return 1; + else + return thread_map__nr(evlist->threads); +} + void perf_evlist__disable(struct perf_evlist *evlist) { int cpu, thread; struct perf_evsel *pos; int nr_cpus = cpu_map__nr(evlist->cpus); - int nr_threads = thread_map__nr(evlist->threads); + int nr_threads; for (cpu = 0; cpu < nr_cpus; cpu++) { evlist__for_each(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE, 0); @@ -288,12 +299,13 @@ void perf_evlist__enable(struct perf_evlist *evlist) int cpu, thread; struct perf_evsel *pos; int nr_cpus = cpu_map__nr(evlist->cpus); - int nr_threads = thread_map__nr(evlist->threads); + int nr_threads; for (cpu = 0; cpu < nr_cpus; cpu++) { evlist__for_each(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); @@ -305,12 +317,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist, struct perf_evsel *evsel) { int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); if (!evsel->fd) return 0; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - for (thread = 0; thread < evlist->threads->nr; thread++) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_DISABLE, 0); if (err) @@ -324,12 +338,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist, struct perf_evsel *evsel) { int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); if (!evsel->fd) return -EINVAL; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - for (thread = 0; thread < evlist->threads->nr; thread++) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); if (err) @@ -339,11 +355,67 @@ int perf_evlist__enable_event(struct perf_evlist *evlist, return 0; } +static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, + struct perf_evsel *evsel, int cpu) +{ + int thread, err; + int nr_threads = perf_evlist__nr_threads(evlist, evsel); + + if (!evsel->fd) + return -EINVAL; + + for (thread = 0; thread < nr_threads; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, + struct perf_evsel *evsel, + int thread) +{ + int cpu, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + + if (!evsel->fd) + return -EINVAL; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +int perf_evlist__enable_event_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx) +{ + bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); + + if (per_cpu_mmaps) + return perf_evlist__enable_event_cpu(evlist, evsel, idx); + else + return perf_evlist__enable_event_thread(evlist, evsel, idx); +} + static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { int nr_cpus = cpu_map__nr(evlist->cpus); int nr_threads = thread_map__nr(evlist->threads); - int nfds = nr_cpus * nr_threads * evlist->nr_entries; + int nfds = 0; + struct perf_evsel *evsel; + + list_for_each_entry(evsel, &evlist->entries, node) { + if (evsel->system_wide) + nfds += nr_cpus; + else + nfds += nr_cpus * nr_threads; + } + evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); return evlist->pollfd != NULL ? 0 : -ENOMEM; } @@ -636,7 +708,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct perf_evsel *evsel; evlist__for_each(evlist, evsel) { - int fd = FD(evsel, cpu, thread); + int fd; + + if (evsel->system_wide && thread) + continue; + + fd = FD(evsel, cpu, thread); if (*output == -1) { *output = fd; @@ -1061,6 +1138,8 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar } if (!evlist->workload.pid) { + int ret; + if (pipe_output) dup2(2, 1); @@ -1078,8 +1157,22 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar /* * Wait until the parent tells us to go. */ - if (read(go_pipe[0], &bf, 1) == -1) - perror("unable to read pipe"); + ret = read(go_pipe[0], &bf, 1); + /* + * The parent will ask for the execvp() to be performed by + * writing exactly one byte, in workload.cork_fd, usually via + * perf_evlist__start_workload(). + * + * For cancelling the workload without actuallin running it, + * the parent will just close workload.cork_fd, without writing + * anything, i.e. read will return zero and we just exit() + * here. + */ + if (ret != 1) { + if (ret == -1) + perror("unable to read pipe"); + exit(ret); + } execvp(argv[0], (char **)argv); @@ -1202,7 +1295,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, int err, char *buf, size_t size) { int printed, value; - char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); switch (err) { case EACCES: @@ -1250,3 +1343,19 @@ void perf_evlist__to_front(struct perf_evlist *evlist, list_splice(&move, &evlist->entries); } + +void perf_evlist__set_tracking_event(struct perf_evlist *evlist, + struct perf_evsel *tracking_evsel) +{ + struct perf_evsel *evsel; + + if (tracking_evsel->tracking) + return; + + evlist__for_each(evlist, evsel) { + if (evsel != tracking_evsel) + evsel->tracking = false; + } + + tracking_evsel->tracking = true; +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index f5173cd63693..106de53a6a74 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -122,6 +122,8 @@ int perf_evlist__disable_event(struct perf_evlist *evlist, struct perf_evsel *evsel); int perf_evlist__enable_event(struct perf_evlist *evlist, struct perf_evsel *evsel); +int perf_evlist__enable_event_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx); void perf_evlist__set_selected(struct perf_evlist *evlist, struct perf_evsel *evsel); @@ -262,4 +264,7 @@ void perf_evlist__to_front(struct perf_evlist *evlist, #define evlist__for_each_safe(evlist, tmp, evsel) \ __evlist__for_each_safe(&(evlist)->entries, tmp, evsel) +void perf_evlist__set_tracking_event(struct perf_evlist *evlist, + struct perf_evsel *tracking_evsel); + #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 21a373ebea22..b38de5819323 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -162,6 +162,7 @@ void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx) { evsel->idx = idx; + evsel->tracking = !idx; evsel->attr = *attr; evsel->leader = evsel; evsel->unit = ""; @@ -561,7 +562,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) { struct perf_evsel *leader = evsel->leader; struct perf_event_attr *attr = &evsel->attr; - int track = !evsel->idx; /* only the first counter needs these */ + int track = evsel->tracking; bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; @@ -633,9 +634,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) if (opts->period) perf_evsel__set_sample_bit(evsel, PERIOD); - if (!perf_missing_features.sample_id_all && - (opts->sample_time || !opts->no_inherit || - target__has_cpu(&opts->target) || per_cpu)) + /* + * When the user explicitely disabled time don't force it here. + */ + if (opts->sample_time && + (!perf_missing_features.sample_id_all && + (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu))) perf_evsel__set_sample_bit(evsel, TIME); if (opts->raw_samples && !evsel->no_aux_samples) { @@ -692,6 +696,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; + + if (evsel->system_wide) + nthreads = 1; + evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); if (evsel->fd) { @@ -710,6 +718,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea { int cpu, thread; + if (evsel->system_wide) + nthreads = 1; + for (cpu = 0; cpu < ncpus; cpu++) { for (thread = 0; thread < nthreads; thread++) { int fd = FD(evsel, cpu, thread), @@ -740,6 +751,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) { + if (evsel->system_wide) + nthreads = 1; + evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); if (evsel->sample_id == NULL) return -ENOMEM; @@ -784,6 +798,9 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; + if (evsel->system_wide) + nthreads = 1; + for (cpu = 0; cpu < ncpus; cpu++) for (thread = 0; thread < nthreads; ++thread) { close(FD(evsel, cpu, thread)); @@ -872,6 +889,9 @@ int __perf_evsel__read(struct perf_evsel *evsel, int cpu, thread; struct perf_counts_values *aggr = &evsel->counts->aggr, count; + if (evsel->system_wide) + nthreads = 1; + aggr->val = aggr->ena = aggr->run = 0; for (cpu = 0; cpu < ncpus; cpu++) { @@ -994,13 +1014,18 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, struct thread_map *threads) { - int cpu, thread; + int cpu, thread, nthreads; unsigned long flags = PERF_FLAG_FD_CLOEXEC; int pid = -1, err; enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + if (evsel->system_wide) + nthreads = 1; + else + nthreads = threads->nr; + if (evsel->fd == NULL && - perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) + perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) return -ENOMEM; if (evsel->cgrp) { @@ -1024,10 +1049,10 @@ retry_sample_id: for (cpu = 0; cpu < cpus->nr; cpu++) { - for (thread = 0; thread < threads->nr; thread++) { + for (thread = 0; thread < nthreads; thread++) { int group_fd; - if (!evsel->cgrp) + if (!evsel->cgrp && !evsel->system_wide) pid = threads->map[thread]; group_fd = get_group_fd(evsel, cpu, thread); @@ -1100,7 +1125,7 @@ out_close: close(FD(evsel, cpu, thread)); FD(evsel, cpu, thread) = -1; } - thread = threads->nr; + thread = nthreads; } while (--cpu >= 0); return err; } @@ -2002,6 +2027,8 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, int err, char *msg, size_t size) { + char sbuf[STRERR_BUFSIZE]; + switch (err) { case EPERM: case EACCES: @@ -2036,13 +2063,20 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); #endif break; + case EBUSY: + if (find_process("oprofiled")) + return scnprintf(msg, size, + "The PMU counters are busy/taken by another profiler.\n" + "We found oprofile daemon running, please stop it and try again."); + break; default: break; } return scnprintf(msg, size, - "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" + "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" "/bin/dmesg may provide additional information.\n" "No CONFIG_PERF_EVENTS=y kernel support configured?\n", - err, strerror(err), perf_evsel__name(evsel)); + err, strerror_r(err, sbuf, sizeof(sbuf)), + perf_evsel__name(evsel)); } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index d7f93ce0ebc1..7bc314be6a7b 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -85,6 +85,8 @@ struct perf_evsel { bool needs_swap; bool no_aux_samples; bool immediate; + bool system_wide; + bool tracking; /* parse modifier helper */ int exclude_GH; int nr_members; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 30df6187ee02..86569fa3651d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -277,6 +277,28 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) } } +void hists__delete_entries(struct hists *hists) +{ + struct rb_node *next = rb_first(&hists->entries); + struct hist_entry *n; + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hists->entries); + + if (sort__need_collapse) + rb_erase(&n->rb_node_in, &hists->entries_collapsed); + + --hists->nr_entries; + if (!n->filtered) + --hists->nr_non_filtered_entries; + + hist_entry__free(n); + } +} + /* * histogram, sorted on item, collects periods */ diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 742f49a85725..8c9c70e18cbb 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -152,6 +152,7 @@ void hists__output_resort(struct hists *hists); void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel); +void hists__delete_entries(struct hists *hists); void hists__output_recalc_col_len(struct hists *hists, int max_rows); u64 hists__total_period(struct hists *hists); @@ -192,6 +193,7 @@ struct perf_hpp { }; struct perf_hpp_fmt { + const char *name; int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct perf_evsel *evsel); int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, @@ -207,6 +209,8 @@ struct perf_hpp_fmt { struct list_head list; struct list_head sort_list; bool elide; + int len; + int user_len; }; extern struct list_head perf_hpp__list; @@ -261,17 +265,19 @@ static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format) } void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists); +void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists); +void perf_hpp__set_user_width(const char *width_list_str); typedef u64 (*hpp_field_fn)(struct hist_entry *he); typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front); typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...); -int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, - hpp_field_fn get_field, const char *fmt, - hpp_snprint_fn print_fn, bool fmt_percent); -int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, - hpp_field_fn get_field, const char *fmt, - hpp_snprint_fn print_fn, bool fmt_percent); +int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); +int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); static inline void advance_hpp(struct perf_hpp *hpp, int inc) { diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 16bba9fff2c8..b2ec38bf211e 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -31,6 +31,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) machine->symbol_filter = NULL; machine->id_hdr_size = 0; + machine->comm_exec = false; + machine->kernel_start = 0; machine->root_dir = strdup(root_dir); if (machine->root_dir == NULL) @@ -179,6 +181,19 @@ void machines__set_symbol_filter(struct machines *machines, } } +void machines__set_comm_exec(struct machines *machines, bool comm_exec) +{ + struct rb_node *nd; + + machines->host.comm_exec = comm_exec; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *machine = rb_entry(nd, struct machine, rb_node); + + machine->comm_exec = comm_exec; + } +} + struct machine *machines__find(struct machines *machines, pid_t pid) { struct rb_node **p = &machines->guests.rb_node; @@ -398,17 +413,31 @@ struct thread *machine__find_thread(struct machine *machine, pid_t pid, return __machine__findnew_thread(machine, pid, tid, false); } +struct comm *machine__thread_exec_comm(struct machine *machine, + struct thread *thread) +{ + if (machine->comm_exec) + return thread__exec_comm(thread); + else + return thread__comm(thread); +} + int machine__process_comm_event(struct machine *machine, union perf_event *event, struct perf_sample *sample) { struct thread *thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid); + bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; + + if (exec) + machine->comm_exec = true; if (dump_trace) perf_event__fprintf_comm(event, stdout); - if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) { + if (thread == NULL || + __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } @@ -565,8 +594,8 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; * Returns the name of the start symbol in *symbol_name. Pass in NULL as * symbol_name if it's not that important. */ -static u64 machine__get_kernel_start_addr(struct machine *machine, - const char **symbol_name) +static u64 machine__get_running_kernel_start(struct machine *machine, + const char **symbol_name) { char filename[PATH_MAX]; int i; @@ -593,7 +622,7 @@ static u64 machine__get_kernel_start_addr(struct machine *machine, int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { enum map_type type; - u64 start = machine__get_kernel_start_addr(machine, NULL); + u64 start = machine__get_running_kernel_start(machine, NULL); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; @@ -912,7 +941,7 @@ int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); const char *name; - u64 addr = machine__get_kernel_start_addr(machine, &name); + u64 addr = machine__get_running_kernel_start(machine, &name); if (!addr) return -1; @@ -1285,6 +1314,16 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread, thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, &al); + if (al.map == NULL) { + /* + * some shared data regions have execute bit set which puts + * their mapping in the MAP__FUNCTION type array. + * Check there as a fallback option before dropping the sample. + */ + thread__find_addr_location(thread, machine, m, MAP__FUNCTION, addr, + &al); + } + ams->addr = addr; ams->al_addr = al.addr; ams->sym = al.sym; @@ -1531,3 +1570,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, return 0; } + +int machine__get_kernel_start(struct machine *machine) +{ + struct map *map = machine__kernel_map(machine, MAP__FUNCTION); + int err = 0; + + /* + * The only addresses above 2^63 are kernel addresses of a 64-bit + * kernel. Note that addresses are unsigned so that on a 32-bit system + * all addresses including kernel addresses are less than 2^32. In + * that case (32-bit system), if the kernel mapping is unknown, all + * addresses will be assumed to be in user space - see + * machine__kernel_ip(). + */ + machine->kernel_start = 1ULL << 63; + if (map) { + err = map__load(map, machine->symbol_filter); + if (map->start) + machine->kernel_start = map->start; + } + return err; +} diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index b972824e6294..6a6bcc1cff54 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -26,6 +26,7 @@ struct machine { struct rb_node rb_node; pid_t pid; u16 id_hdr_size; + bool comm_exec; char *root_dir; struct rb_root threads; struct list_head dead_threads; @@ -35,6 +36,7 @@ struct machine { struct list_head kernel_dsos; struct map_groups kmaps; struct map *vmlinux_maps[MAP__NR_TYPES]; + u64 kernel_start; symbol_filter_t symbol_filter; pid_t *current_tid; }; @@ -45,8 +47,26 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type) return machine->vmlinux_maps[type]; } +int machine__get_kernel_start(struct machine *machine); + +static inline u64 machine__kernel_start(struct machine *machine) +{ + if (!machine->kernel_start) + machine__get_kernel_start(machine); + return machine->kernel_start; +} + +static inline bool machine__kernel_ip(struct machine *machine, u64 ip) +{ + u64 kernel_start = machine__kernel_start(machine); + + return ip >= kernel_start; +} + struct thread *machine__find_thread(struct machine *machine, pid_t pid, pid_t tid); +struct comm *machine__thread_exec_comm(struct machine *machine, + struct thread *thread); int machine__process_comm_event(struct machine *machine, union perf_event *event, struct perf_sample *sample); @@ -88,6 +108,7 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size); void machines__set_symbol_filter(struct machines *machines, symbol_filter_t symbol_filter); +void machines__set_comm_exec(struct machines *machines, bool comm_exec); struct machine *machine__new_host(void); int machine__init(struct machine *machine, const char *root_dir, pid_t pid); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 31b8905dd863..b7090596ac50 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -31,6 +31,7 @@ static inline int is_anon_memory(const char *filename) static inline int is_no_dso_memory(const char *filename) { return !strncmp(filename, "[stack", 6) || + !strncmp(filename, "/SYSV",5) || !strcmp(filename, "[heap]"); } diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c new file mode 100644 index 000000000000..706ce1a66169 --- /dev/null +++ b/tools/perf/util/ordered-events.c @@ -0,0 +1,245 @@ +#include <linux/list.h> +#include <linux/compiler.h> +#include "ordered-events.h" +#include "evlist.h" +#include "session.h" +#include "asm/bug.h" +#include "debug.h" + +#define pr_N(n, fmt, ...) \ + eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__) + +#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) + +static void queue_event(struct ordered_events *oe, struct ordered_event *new) +{ + struct ordered_event *last = oe->last; + u64 timestamp = new->timestamp; + struct list_head *p; + + ++oe->nr_events; + oe->last = new; + + pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events); + + if (!last) { + list_add(&new->list, &oe->events); + oe->max_timestamp = timestamp; + return; + } + + /* + * last event might point to some random place in the list as it's + * the last queued event. We expect that the new event is close to + * this. + */ + if (last->timestamp <= timestamp) { + while (last->timestamp <= timestamp) { + p = last->list.next; + if (p == &oe->events) { + list_add_tail(&new->list, &oe->events); + oe->max_timestamp = timestamp; + return; + } + last = list_entry(p, struct ordered_event, list); + } + list_add_tail(&new->list, &last->list); + } else { + while (last->timestamp > timestamp) { + p = last->list.prev; + if (p == &oe->events) { + list_add(&new->list, &oe->events); + return; + } + last = list_entry(p, struct ordered_event, list); + } + list_add(&new->list, &last->list); + } +} + +#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event)) +static struct ordered_event *alloc_event(struct ordered_events *oe) +{ + struct list_head *cache = &oe->cache; + struct ordered_event *new = NULL; + + if (!list_empty(cache)) { + new = list_entry(cache->next, struct ordered_event, list); + list_del(&new->list); + } else if (oe->buffer) { + new = oe->buffer + oe->buffer_idx; + if (++oe->buffer_idx == MAX_SAMPLE_BUFFER) + oe->buffer = NULL; + } else if (oe->cur_alloc_size < oe->max_alloc_size) { + size_t size = MAX_SAMPLE_BUFFER * sizeof(*new); + + oe->buffer = malloc(size); + if (!oe->buffer) + return NULL; + + pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n", + oe->cur_alloc_size, size, oe->max_alloc_size); + + oe->cur_alloc_size += size; + list_add(&oe->buffer->list, &oe->to_free); + + /* First entry is abused to maintain the to_free list. */ + oe->buffer_idx = 2; + new = oe->buffer + 1; + } else { + pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size); + } + + return new; +} + +struct ordered_event * +ordered_events__new(struct ordered_events *oe, u64 timestamp) +{ + struct ordered_event *new; + + new = alloc_event(oe); + if (new) { + new->timestamp = timestamp; + queue_event(oe, new); + } + + return new; +} + +void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) +{ + list_move(&event->list, &oe->cache); + oe->nr_events--; +} + +static int __ordered_events__flush(struct perf_session *s, + struct perf_tool *tool) +{ + struct ordered_events *oe = &s->ordered_events; + struct list_head *head = &oe->events; + struct ordered_event *tmp, *iter; + struct perf_sample sample; + u64 limit = oe->next_flush; + u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; + bool show_progress = limit == ULLONG_MAX; + struct ui_progress prog; + int ret; + + if (!tool->ordered_events || !limit) + return 0; + + if (show_progress) + ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); + + list_for_each_entry_safe(iter, tmp, head, list) { + if (session_done()) + return 0; + + if (iter->timestamp > limit) + break; + + ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); + if (ret) + pr_err("Can't parse sample, err = %d\n", ret); + else { + ret = perf_session__deliver_event(s, iter->event, &sample, tool, + iter->file_offset); + if (ret) + return ret; + } + + ordered_events__delete(oe, iter); + oe->last_flush = iter->timestamp; + + if (show_progress) + ui_progress__update(&prog, 1); + } + + if (list_empty(head)) + oe->last = NULL; + else if (last_ts <= limit) + oe->last = list_entry(head->prev, struct ordered_event, list); + + return 0; +} + +int ordered_events__flush(struct perf_session *s, struct perf_tool *tool, + enum oe_flush how) +{ + struct ordered_events *oe = &s->ordered_events; + static const char * const str[] = { + "NONE", + "FINAL", + "ROUND", + "HALF ", + }; + int err; + + switch (how) { + case OE_FLUSH__FINAL: + oe->next_flush = ULLONG_MAX; + break; + + case OE_FLUSH__HALF: + { + struct ordered_event *first, *last; + struct list_head *head = &oe->events; + + first = list_entry(head->next, struct ordered_event, list); + last = oe->last; + + /* Warn if we are called before any event got allocated. */ + if (WARN_ONCE(!last || list_empty(head), "empty queue")) + return 0; + + oe->next_flush = first->timestamp; + oe->next_flush += (last->timestamp - first->timestamp) / 2; + break; + } + + case OE_FLUSH__ROUND: + case OE_FLUSH__NONE: + default: + break; + }; + + pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n", + str[how], oe->nr_events); + pr_oe_time(oe->max_timestamp, "max_timestamp\n"); + + err = __ordered_events__flush(s, tool); + + if (!err) { + if (how == OE_FLUSH__ROUND) + oe->next_flush = oe->max_timestamp; + + oe->last_flush_type = how; + } + + pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n", + str[how], oe->nr_events); + pr_oe_time(oe->last_flush, "last_flush\n"); + + return err; +} + +void ordered_events__init(struct ordered_events *oe) +{ + INIT_LIST_HEAD(&oe->events); + INIT_LIST_HEAD(&oe->cache); + INIT_LIST_HEAD(&oe->to_free); + oe->max_alloc_size = (u64) -1; + oe->cur_alloc_size = 0; +} + +void ordered_events__free(struct ordered_events *oe) +{ + while (!list_empty(&oe->to_free)) { + struct ordered_event *event; + + event = list_entry(oe->to_free.next, struct ordered_event, list); + list_del(&event->list); + free(event); + } +} diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h new file mode 100644 index 000000000000..3b2f20542a01 --- /dev/null +++ b/tools/perf/util/ordered-events.h @@ -0,0 +1,51 @@ +#ifndef __ORDERED_EVENTS_H +#define __ORDERED_EVENTS_H + +#include <linux/types.h> +#include "tool.h" + +struct perf_session; + +struct ordered_event { + u64 timestamp; + u64 file_offset; + union perf_event *event; + struct list_head list; +}; + +enum oe_flush { + OE_FLUSH__NONE, + OE_FLUSH__FINAL, + OE_FLUSH__ROUND, + OE_FLUSH__HALF, +}; + +struct ordered_events { + u64 last_flush; + u64 next_flush; + u64 max_timestamp; + u64 max_alloc_size; + u64 cur_alloc_size; + struct list_head events; + struct list_head cache; + struct list_head to_free; + struct ordered_event *buffer; + struct ordered_event *last; + int buffer_idx; + unsigned int nr_events; + enum oe_flush last_flush_type; +}; + +struct ordered_event *ordered_events__new(struct ordered_events *oe, u64 timestamp); +void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); +int ordered_events__flush(struct perf_session *s, struct perf_tool *tool, + enum oe_flush how); +void ordered_events__init(struct ordered_events *oe); +void ordered_events__free(struct ordered_events *oe); + +static inline +void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size) +{ + oe->max_alloc_size = size; +} +#endif /* __ORDERED_EVENTS_H */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 1e15df10a88c..e34c81a0bcf3 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -10,6 +10,7 @@ #include "symbol.h" #include "cache.h" #include "header.h" +#include "debug.h" #include <api/fs/debugfs.h> #include "parse-events-bison.h" #define YY_EXTRA_TYPE int @@ -1006,9 +1007,11 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; + char sbuf[STRERR_BUFSIZE]; if (debugfs_valid_mountpoint(tracing_events_path)) { - printf(" [ Tracepoints not available: %s ]\n", strerror(errno)); + printf(" [ Tracepoints not available: %s ]\n", + strerror_r(errno, sbuf, sizeof(sbuf))); return; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 7a811eb61f75..9bf582750561 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -14,8 +14,8 @@ struct perf_pmu_alias { char *name; - struct list_head terms; - struct list_head list; + struct list_head terms; /* HEAD struct parse_events_term -> list */ + struct list_head list; /* ELEM */ char unit[UNIT_MAX_LEN+1]; double scale; }; diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index c14a543ce1f3..1c1e2eecbe1f 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -17,9 +17,9 @@ struct perf_pmu { char *name; __u32 type; struct cpu_map *cpus; - struct list_head format; - struct list_head aliases; - struct list_head list; + struct list_head format; /* HEAD struct perf_pmu_format -> list */ + struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */ + struct list_head list; /* ELEM */ }; struct perf_pmu *perf_pmu__find(const char *name); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 9a0a1839a377..f73595fc0627 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -79,7 +79,7 @@ static int init_symbol_maps(bool user_only) int ret; symbol_conf.sort_by_name = true; - ret = symbol__init(); + ret = symbol__init(NULL); if (ret < 0) { pr_debug("Failed to init symbol map.\n"); goto out; @@ -258,21 +258,33 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) #ifdef HAVE_DWARF_SUPPORT /* Open new debuginfo of given module */ -static struct debuginfo *open_debuginfo(const char *module) +static struct debuginfo *open_debuginfo(const char *module, bool silent) { const char *path = module; + struct debuginfo *ret; if (!module || !strchr(module, '/')) { path = kernel_get_module_path(module); if (!path) { - pr_err("Failed to find path of %s module.\n", - module ?: "kernel"); + if (!silent) + pr_err("Failed to find path of %s module.\n", + module ?: "kernel"); return NULL; } } - return debuginfo__new(path); + ret = debuginfo__new(path); + if (!ret && !silent) { + pr_warning("The %s file has no debug information.\n", path); + if (!module || !strtailcmp(path, ".ko")) + pr_warning("Rebuild with CONFIG_DEBUG_INFO=y, "); + else + pr_warning("Rebuild with -g, "); + pr_warning("or install an appropriate debuginfo package.\n"); + } + return ret; } + static int get_text_start_address(const char *exec, unsigned long *address) { Elf *elf; @@ -333,15 +345,13 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, pr_debug("try to find information at %" PRIx64 " in %s\n", addr, tp->module ? : "kernel"); - dinfo = open_debuginfo(tp->module); + dinfo = open_debuginfo(tp->module, verbose == 0); if (dinfo) { ret = debuginfo__find_probe_point(dinfo, (unsigned long)addr, pp); debuginfo__delete(dinfo); - } else { - pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr); + } else ret = -ENOENT; - } if (ret > 0) { pp->retprobe = tp->retprobe; @@ -457,13 +467,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct debuginfo *dinfo; int ntevs, ret = 0; - dinfo = open_debuginfo(target); + dinfo = open_debuginfo(target, !need_dwarf); if (!dinfo) { - if (need_dwarf) { - pr_warning("Failed to open debuginfo file.\n"); + if (need_dwarf) return -ENOENT; - } pr_debug("Could not open debuginfo. Try to use symbols.\n"); return 0; } @@ -565,7 +573,7 @@ static int get_real_path(const char *raw_path, const char *comp_dir, static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) { - char buf[LINEBUF_SIZE]; + char buf[LINEBUF_SIZE], sbuf[STRERR_BUFSIZE]; const char *color = show_num ? "" : PERF_COLOR_BLUE; const char *prefix = NULL; @@ -585,7 +593,8 @@ static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) return 1; error: if (ferror(fp)) { - pr_warning("File read error: %s\n", strerror(errno)); + pr_warning("File read error: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); return -1; } return 0; @@ -618,13 +627,12 @@ static int __show_line_range(struct line_range *lr, const char *module) FILE *fp; int ret; char *tmp; + char sbuf[STRERR_BUFSIZE]; /* Search a line range */ - dinfo = open_debuginfo(module); - if (!dinfo) { - pr_warning("Failed to open debuginfo file.\n"); + dinfo = open_debuginfo(module, false); + if (!dinfo) return -ENOENT; - } ret = debuginfo__find_line_range(dinfo, lr); debuginfo__delete(dinfo); @@ -656,7 +664,7 @@ static int __show_line_range(struct line_range *lr, const char *module) fp = fopen(lr->path, "r"); if (fp == NULL) { pr_warning("Failed to open %s: %s\n", lr->path, - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); return -errno; } /* Skip to starting line number */ @@ -772,9 +780,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, if (ret < 0) return ret; - dinfo = open_debuginfo(module); + dinfo = open_debuginfo(module, false); if (!dinfo) { - pr_warning("Failed to open debuginfo file.\n"); ret = -ENOENT; goto out; } @@ -1405,8 +1412,7 @@ int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len) return tmp - buf; error: - pr_debug("Failed to synthesize perf probe argument: %s\n", - strerror(-ret)); + pr_debug("Failed to synthesize perf probe argument: %d\n", ret); return ret; } @@ -1455,8 +1461,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) return buf; error: - pr_debug("Failed to synthesize perf probe point: %s\n", - strerror(-ret)); + pr_debug("Failed to synthesize perf probe point: %d\n", ret); free(buf); return NULL; } @@ -1780,10 +1785,11 @@ static void clear_probe_trace_event(struct probe_trace_event *tev) memset(tev, 0, sizeof(*tev)); } -static void print_warn_msg(const char *file, bool is_kprobe) +static void print_open_warning(int err, bool is_kprobe) { + char sbuf[STRERR_BUFSIZE]; - if (errno == ENOENT) { + if (err == -ENOENT) { const char *config; if (!is_kprobe) @@ -1791,25 +1797,43 @@ static void print_warn_msg(const char *file, bool is_kprobe) else config = "CONFIG_KPROBE_EVENTS"; - pr_warning("%s file does not exist - please rebuild kernel" - " with %s.\n", file, config); - } else - pr_warning("Failed to open %s file: %s\n", file, - strerror(errno)); + pr_warning("%cprobe_events file does not exist" + " - please rebuild kernel with %s.\n", + is_kprobe ? 'k' : 'u', config); + } else if (err == -ENOTSUP) + pr_warning("Debugfs is not mounted.\n"); + else + pr_warning("Failed to open %cprobe_events: %s\n", + is_kprobe ? 'k' : 'u', + strerror_r(-err, sbuf, sizeof(sbuf))); } -static int open_probe_events(const char *trace_file, bool readwrite, - bool is_kprobe) +static void print_both_open_warning(int kerr, int uerr) +{ + /* Both kprobes and uprobes are disabled, warn it. */ + if (kerr == -ENOTSUP && uerr == -ENOTSUP) + pr_warning("Debugfs is not mounted.\n"); + else if (kerr == -ENOENT && uerr == -ENOENT) + pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS " + "or/and CONFIG_UPROBE_EVENTS.\n"); + else { + char sbuf[STRERR_BUFSIZE]; + pr_warning("Failed to open kprobe events: %s.\n", + strerror_r(-kerr, sbuf, sizeof(sbuf))); + pr_warning("Failed to open uprobe events: %s.\n", + strerror_r(-uerr, sbuf, sizeof(sbuf))); + } +} + +static int open_probe_events(const char *trace_file, bool readwrite) { char buf[PATH_MAX]; const char *__debugfs; int ret; __debugfs = debugfs_find_mountpoint(); - if (__debugfs == NULL) { - pr_warning("Debugfs is not mounted.\n"); - return -ENOENT; - } + if (__debugfs == NULL) + return -ENOTSUP; ret = e_snprintf(buf, PATH_MAX, "%s/%s", __debugfs, trace_file); if (ret >= 0) { @@ -1820,19 +1844,19 @@ static int open_probe_events(const char *trace_file, bool readwrite, ret = open(buf, O_RDONLY, 0); if (ret < 0) - print_warn_msg(buf, is_kprobe); + ret = -errno; } return ret; } static int open_kprobe_events(bool readwrite) { - return open_probe_events("tracing/kprobe_events", readwrite, true); + return open_probe_events("tracing/kprobe_events", readwrite); } static int open_uprobe_events(bool readwrite) { - return open_probe_events("tracing/uprobe_events", readwrite, false); + return open_probe_events("tracing/uprobe_events", readwrite); } /* Get raw string list of current kprobe_events or uprobe_events */ @@ -1857,7 +1881,7 @@ static struct strlist *get_probe_trace_command_rawlist(int fd) p[idx] = '\0'; ret = strlist__add(sl, buf); if (ret < 0) { - pr_debug("strlist__add failed: %s\n", strerror(-ret)); + pr_debug("strlist__add failed (%d)\n", ret); strlist__delete(sl); return NULL; } @@ -1916,7 +1940,7 @@ static int __show_perf_probe_events(int fd, bool is_kprobe) rawlist = get_probe_trace_command_rawlist(fd); if (!rawlist) - return -ENOENT; + return -ENOMEM; strlist__for_each(ent, rawlist) { ret = parse_probe_trace_command(ent->s, &tev); @@ -1940,27 +1964,34 @@ static int __show_perf_probe_events(int fd, bool is_kprobe) /* List up current perf-probe events */ int show_perf_probe_events(void) { - int fd, ret; + int kp_fd, up_fd, ret; setup_pager(); - fd = open_kprobe_events(false); - - if (fd < 0) - return fd; ret = init_symbol_maps(false); if (ret < 0) return ret; - ret = __show_perf_probe_events(fd, true); - close(fd); + kp_fd = open_kprobe_events(false); + if (kp_fd >= 0) { + ret = __show_perf_probe_events(kp_fd, true); + close(kp_fd); + if (ret < 0) + goto out; + } - fd = open_uprobe_events(false); - if (fd >= 0) { - ret = __show_perf_probe_events(fd, false); - close(fd); + up_fd = open_uprobe_events(false); + if (kp_fd < 0 && up_fd < 0) { + print_both_open_warning(kp_fd, up_fd); + ret = kp_fd; + goto out; } + if (up_fd >= 0) { + ret = __show_perf_probe_events(up_fd, false); + close(up_fd); + } +out: exit_symbol_maps(); return ret; } @@ -1976,6 +2007,8 @@ static struct strlist *get_probe_trace_event_names(int fd, bool include_group) memset(&tev, 0, sizeof(tev)); rawlist = get_probe_trace_command_rawlist(fd); + if (!rawlist) + return NULL; sl = strlist__new(true, NULL); strlist__for_each(ent, rawlist) { ret = parse_probe_trace_command(ent->s, &tev); @@ -2005,6 +2038,7 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev) { int ret = 0; char *buf = synthesize_probe_trace_command(tev); + char sbuf[STRERR_BUFSIZE]; if (!buf) { pr_debug("Failed to synthesize probe trace event.\n"); @@ -2016,7 +2050,7 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev) ret = write(fd, buf, strlen(buf)); if (ret <= 0) pr_warning("Failed to write event: %s\n", - strerror(errno)); + strerror_r(errno, sbuf, sizeof(sbuf))); } free(buf); return ret; @@ -2030,7 +2064,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base, /* Try no suffix */ ret = e_snprintf(buf, len, "%s", base); if (ret < 0) { - pr_debug("snprintf() failed: %s\n", strerror(-ret)); + pr_debug("snprintf() failed: %d\n", ret); return ret; } if (!strlist__has_entry(namelist, buf)) @@ -2046,7 +2080,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base, for (i = 1; i < MAX_EVENT_INDEX; i++) { ret = e_snprintf(buf, len, "%s_%d", base, i); if (ret < 0) { - pr_debug("snprintf() failed: %s\n", strerror(-ret)); + pr_debug("snprintf() failed: %d\n", ret); return ret; } if (!strlist__has_entry(namelist, buf)) @@ -2075,8 +2109,11 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, else fd = open_kprobe_events(true); - if (fd < 0) + if (fd < 0) { + print_open_warning(fd, !pev->uprobes); return fd; + } + /* Get current event names */ namelist = get_probe_trace_event_names(fd, false); if (!namelist) { @@ -2408,7 +2445,8 @@ static int __del_trace_probe_event(int fd, struct str_node *ent) printf("Removed event: %s\n", ent->s); return 0; error: - pr_warning("Failed to delete event: %s\n", strerror(-ret)); + pr_warning("Failed to delete event: %s\n", + strerror_r(-ret, buf, sizeof(buf))); return ret; } @@ -2449,15 +2487,18 @@ int del_perf_probe_events(struct strlist *dellist) /* Get current event names */ kfd = open_kprobe_events(true); - if (kfd < 0) - return kfd; + if (kfd >= 0) + namelist = get_probe_trace_event_names(kfd, true); - namelist = get_probe_trace_event_names(kfd, true); ufd = open_uprobe_events(true); - if (ufd >= 0) unamelist = get_probe_trace_event_names(ufd, true); + if (kfd < 0 && ufd < 0) { + print_both_open_warning(kfd, ufd); + goto error; + } + if (namelist == NULL && unamelist == NULL) goto error; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index dca9145d704c..9c593561aa71 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -281,6 +281,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, struct probe_trace_arg_ref **ref_ptr = &tvar->ref; Dwarf_Die type; char buf[16]; + char sbuf[STRERR_BUFSIZE]; int bsize, boffs, total; int ret; @@ -367,7 +368,7 @@ formatted: if (ret >= 16) ret = -E2BIG; pr_warning("Failed to convert variable type: %s\n", - strerror(-ret)); + strerror_r(-ret, sbuf, sizeof(sbuf))); return ret; } tvar->type = strdup(buf); @@ -779,10 +780,12 @@ static int find_lazy_match_lines(struct intlist *list, size_t line_len; ssize_t len; int count = 0, linenum = 1; + char sbuf[STRERR_BUFSIZE]; fp = fopen(fname, "r"); if (!fp) { - pr_warning("Failed to open %s: %s\n", fname, strerror(errno)); + pr_warning("Failed to open %s: %s\n", fname, + strerror_r(errno, sbuf, sizeof(sbuf))); return -errno; } diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index fe8079edbdc1..cf69325b985f 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -14,6 +14,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) struct perf_evsel *evsel; unsigned long flags = perf_event_open_cloexec_flag(); int err = -EAGAIN, fd; + static pid_t pid = -1; evlist = perf_evlist__new(); if (!evlist) @@ -24,14 +25,22 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) evsel = perf_evlist__first(evlist); - fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags); - if (fd < 0) - goto out_delete; + while (1) { + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); + if (fd < 0) { + if (pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + goto out_delete; + } + break; + } close(fd); fn(evsel); - fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags); + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); if (fd < 0) { if (errno == EINVAL) err = -EINVAL; @@ -47,7 +56,7 @@ out_delete: static bool perf_probe_api(setup_probe_fn_t fn) { - const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL}; + const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL}; struct cpu_map *cpus; int cpu, ret, i = 0; @@ -106,7 +115,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) evlist__for_each(evlist, evsel) { perf_evsel__config(evsel, opts); - if (!evsel->idx && use_comm_exec) + if (evsel->tracking && use_comm_exec) evsel->attr.comm_exec = 1; } @@ -201,6 +210,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) struct perf_evsel *evsel; int err, fd, cpu; bool ret = false; + pid_t pid = -1; temp_evlist = perf_evlist__new(); if (!temp_evlist) @@ -221,12 +231,20 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) cpu = evlist->cpus->map[0]; } - fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, - perf_event_open_cloexec_flag()); - if (fd >= 0) { - close(fd); - ret = true; + while (1) { + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, + perf_event_open_cloexec_flag()); + if (fd < 0) { + if (pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + goto out_delete; + } + break; } + close(fd); + ret = true; out_delete: perf_evlist__delete(temp_evlist); diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c index da8e9b285f51..34622b53e733 100644 --- a/tools/perf/util/run-command.c +++ b/tools/perf/util/run-command.c @@ -1,6 +1,7 @@ #include "cache.h" #include "run-command.h" #include "exec_cmd.h" +#include "debug.h" static inline void close_pair(int fd[2]) { @@ -19,6 +20,7 @@ int start_command(struct child_process *cmd) { int need_in, need_out, need_err; int fdin[2], fdout[2], fderr[2]; + char sbuf[STRERR_BUFSIZE]; /* * In case of errors we must keep the promise to close FDs @@ -99,7 +101,7 @@ int start_command(struct child_process *cmd) if (cmd->dir && chdir(cmd->dir)) die("exec %s: cd to %s failed (%s)", cmd->argv[0], - cmd->dir, strerror(errno)); + cmd->dir, strerror_r(errno, sbuf, sizeof(sbuf))); if (cmd->env) { for (; *cmd->env; cmd->env++) { if (strchr(*cmd->env, '=')) @@ -153,6 +155,8 @@ int start_command(struct child_process *cmd) static int wait_or_whine(pid_t pid) { + char sbuf[STRERR_BUFSIZE]; + for (;;) { int status, code; pid_t waiting = waitpid(pid, &status, 0); @@ -160,7 +164,8 @@ static int wait_or_whine(pid_t pid) if (waiting < 0) { if (errno == EINTR) continue; - error("waitpid failed (%s)", strerror(errno)); + error("waitpid failed (%s)", + strerror_r(errno, sbuf, sizeof(sbuf))); return -ERR_RUN_COMMAND_WAITPID; } if (waiting != pid) diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index b2dba9c0a3a1..0a01bac4ce02 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -432,6 +432,11 @@ error: return err; } +static int perl_flush_script(void) +{ + return 0; +} + /* * Stop trace script */ @@ -633,6 +638,7 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile) struct scripting_ops perl_scripting_ops = { .name = "Perl", .start_script = perl_start_script, + .flush_script = perl_flush_script, .stop_script = perl_stop_script, .process_event = perl_process_event, .generate_script = perl_generate_script, diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index cbce2545da45..56ba07cce549 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -73,6 +73,35 @@ static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObj Py_DECREF(val); } +static PyObject *get_handler(const char *handler_name) +{ + PyObject *handler; + + handler = PyDict_GetItemString(main_dict, handler_name); + if (handler && !PyCallable_Check(handler)) + return NULL; + return handler; +} + +static void call_object(PyObject *handler, PyObject *args, const char *die_msg) +{ + PyObject *retval; + + retval = PyObject_CallObject(handler, args); + if (retval == NULL) + handler_call_die(die_msg); + Py_DECREF(retval); +} + +static void try_call_object(const char *handler_name, PyObject *args) +{ + PyObject *handler; + + handler = get_handler(handler_name); + if (handler) + call_object(handler, args, handler_name); +} + static void define_value(enum print_arg_type field_type, const char *ev_name, const char *field_name, @@ -80,7 +109,7 @@ static void define_value(enum print_arg_type field_type, const char *field_str) { const char *handler_name = "define_flag_value"; - PyObject *handler, *t, *retval; + PyObject *t; unsigned long long value; unsigned n = 0; @@ -98,13 +127,7 @@ static void define_value(enum print_arg_type field_type, PyTuple_SetItem(t, n++, PyInt_FromLong(value)); PyTuple_SetItem(t, n++, PyString_FromString(field_str)); - handler = PyDict_GetItemString(main_dict, handler_name); - if (handler && PyCallable_Check(handler)) { - retval = PyObject_CallObject(handler, t); - if (retval == NULL) - handler_call_die(handler_name); - Py_DECREF(retval); - } + try_call_object(handler_name, t); Py_DECREF(t); } @@ -127,7 +150,7 @@ static void define_field(enum print_arg_type field_type, const char *delim) { const char *handler_name = "define_flag_field"; - PyObject *handler, *t, *retval; + PyObject *t; unsigned n = 0; if (field_type == PRINT_SYMBOL) @@ -145,13 +168,7 @@ static void define_field(enum print_arg_type field_type, if (field_type == PRINT_FLAGS) PyTuple_SetItem(t, n++, PyString_FromString(delim)); - handler = PyDict_GetItemString(main_dict, handler_name); - if (handler && PyCallable_Check(handler)) { - retval = PyObject_CallObject(handler, t); - if (retval == NULL) - handler_call_die(handler_name); - Py_DECREF(retval); - } + try_call_object(handler_name, t); Py_DECREF(t); } @@ -362,7 +379,7 @@ static void python_process_tracepoint(struct perf_sample *sample, struct thread *thread, struct addr_location *al) { - PyObject *handler, *retval, *context, *t, *obj, *callchain; + PyObject *handler, *context, *t, *obj, *callchain; PyObject *dict = NULL; static char handler_name[256]; struct format_field *field; @@ -387,9 +404,7 @@ static void python_process_tracepoint(struct perf_sample *sample, sprintf(handler_name, "%s__%s", event->system, event->name); - handler = PyDict_GetItemString(main_dict, handler_name); - if (handler && !PyCallable_Check(handler)) - handler = NULL; + handler = get_handler(handler_name); if (!handler) { dict = PyDict_New(); if (!dict) @@ -450,19 +465,9 @@ static void python_process_tracepoint(struct perf_sample *sample, Py_FatalError("error resizing Python tuple"); if (handler) { - retval = PyObject_CallObject(handler, t); - if (retval == NULL) - handler_call_die(handler_name); - Py_DECREF(retval); + call_object(handler, t, handler_name); } else { - handler = PyDict_GetItemString(main_dict, "trace_unhandled"); - if (handler && PyCallable_Check(handler)) { - - retval = PyObject_CallObject(handler, t); - if (retval == NULL) - handler_call_die("trace_unhandled"); - Py_DECREF(retval); - } + try_call_object("trace_unhandled", t); Py_DECREF(dict); } @@ -474,7 +479,7 @@ static void python_process_general_event(struct perf_sample *sample, struct thread *thread, struct addr_location *al) { - PyObject *handler, *retval, *t, *dict, *callchain, *dict_sample; + PyObject *handler, *t, *dict, *callchain, *dict_sample; static char handler_name[64]; unsigned n = 0; @@ -496,8 +501,8 @@ static void python_process_general_event(struct perf_sample *sample, snprintf(handler_name, sizeof(handler_name), "%s", "process_event"); - handler = PyDict_GetItemString(main_dict, handler_name); - if (!handler || !PyCallable_Check(handler)) + handler = get_handler(handler_name); + if (!handler) goto exit; pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); @@ -539,10 +544,7 @@ static void python_process_general_event(struct perf_sample *sample, if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); - retval = PyObject_CallObject(handler, t); - if (retval == NULL) - handler_call_die(handler_name); - Py_DECREF(retval); + call_object(handler, t, handler_name); exit: Py_DECREF(dict); Py_DECREF(t); @@ -566,36 +568,24 @@ static void python_process_event(union perf_event *event __maybe_unused, static int run_start_sub(void) { - PyObject *handler, *retval; - int err = 0; - main_module = PyImport_AddModule("__main__"); if (main_module == NULL) return -1; Py_INCREF(main_module); main_dict = PyModule_GetDict(main_module); - if (main_dict == NULL) { - err = -1; + if (main_dict == NULL) goto error; - } Py_INCREF(main_dict); - handler = PyDict_GetItemString(main_dict, "trace_begin"); - if (handler == NULL || !PyCallable_Check(handler)) - goto out; + try_call_object("trace_begin", NULL); - retval = PyObject_CallObject(handler, NULL); - if (retval == NULL) - handler_call_die("trace_begin"); + return 0; - Py_DECREF(retval); - return err; error: Py_XDECREF(main_dict); Py_XDECREF(main_module); -out: - return err; + return -1; } /* @@ -649,28 +639,23 @@ error: return err; } +static int python_flush_script(void) +{ + return 0; +} + /* * Stop trace script */ static int python_stop_script(void) { - PyObject *handler, *retval; - int err = 0; + try_call_object("trace_end", NULL); - handler = PyDict_GetItemString(main_dict, "trace_end"); - if (handler == NULL || !PyCallable_Check(handler)) - goto out; - - retval = PyObject_CallObject(handler, NULL); - if (retval == NULL) - handler_call_die("trace_end"); - Py_DECREF(retval); -out: Py_XDECREF(main_dict); Py_XDECREF(main_module); Py_Finalize(); - return err; + return 0; } static int python_generate_script(struct pevent *pevent, const char *outfile) @@ -843,6 +828,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) struct scripting_ops python_scripting_ops = { .name = "Python", .start_script = python_start_script, + .flush_script = python_flush_script, .stop_script = python_stop_script, .process_event = python_process_event, .generate_script = python_generate_script, diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 88dfef70c13d..6d2d50dea1d8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -14,6 +14,7 @@ #include "util.h" #include "cpumap.h" #include "perf_regs.h" +#include "asm/bug.h" static int perf_session__open(struct perf_session *session) { @@ -66,6 +67,25 @@ static void perf_session__destroy_kernel_maps(struct perf_session *session) machines__destroy_kernel_maps(&session->machines); } +static bool perf_session__has_comm_exec(struct perf_session *session) +{ + struct perf_evsel *evsel; + + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.comm_exec) + return true; + } + + return false; +} + +static void perf_session__set_comm_exec(struct perf_session *session) +{ + bool comm_exec = perf_session__has_comm_exec(session); + + machines__set_comm_exec(&session->machines, comm_exec); +} + struct perf_session *perf_session__new(struct perf_data_file *file, bool repipe, struct perf_tool *tool) { @@ -75,9 +95,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file, goto out; session->repipe = repipe; - INIT_LIST_HEAD(&session->ordered_samples.samples); - INIT_LIST_HEAD(&session->ordered_samples.sample_cache); - INIT_LIST_HEAD(&session->ordered_samples.to_free); + ordered_events__init(&session->ordered_events); machines__init(&session->machines); if (file) { @@ -91,6 +109,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file, goto out_close; perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); } } @@ -104,9 +123,9 @@ struct perf_session *perf_session__new(struct perf_data_file *file, } if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) { + tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); - tool->ordered_samples = false; + tool->ordered_events = false; } return session; @@ -238,7 +257,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool) if (tool->build_id == NULL) tool->build_id = process_finished_round_stub; if (tool->finished_round == NULL) { - if (tool->ordered_samples) + if (tool->ordered_events) tool->finished_round = process_finished_round; else tool->finished_round = process_finished_round_stub; @@ -444,87 +463,6 @@ static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_HEADER_MAX] = NULL, }; -struct sample_queue { - u64 timestamp; - u64 file_offset; - union perf_event *event; - struct list_head list; -}; - -static void perf_session_free_sample_buffers(struct perf_session *session) -{ - struct ordered_samples *os = &session->ordered_samples; - - while (!list_empty(&os->to_free)) { - struct sample_queue *sq; - - sq = list_entry(os->to_free.next, struct sample_queue, list); - list_del(&sq->list); - free(sq); - } -} - -static int perf_session_deliver_event(struct perf_session *session, - union perf_event *event, - struct perf_sample *sample, - struct perf_tool *tool, - u64 file_offset); - -static int flush_sample_queue(struct perf_session *s, - struct perf_tool *tool) -{ - struct ordered_samples *os = &s->ordered_samples; - struct list_head *head = &os->samples; - struct sample_queue *tmp, *iter; - struct perf_sample sample; - u64 limit = os->next_flush; - u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; - bool show_progress = limit == ULLONG_MAX; - struct ui_progress prog; - int ret; - - if (!tool->ordered_samples || !limit) - return 0; - - if (show_progress) - ui_progress__init(&prog, os->nr_samples, "Processing time ordered events..."); - - list_for_each_entry_safe(iter, tmp, head, list) { - if (session_done()) - return 0; - - if (iter->timestamp > limit) - break; - - ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); - if (ret) - pr_err("Can't parse sample, err = %d\n", ret); - else { - ret = perf_session_deliver_event(s, iter->event, &sample, tool, - iter->file_offset); - if (ret) - return ret; - } - - os->last_flush = iter->timestamp; - list_del(&iter->list); - list_add(&iter->list, &os->sample_cache); - os->nr_samples--; - - if (show_progress) - ui_progress__update(&prog, 1); - } - - if (list_empty(head)) { - os->last_sample = NULL; - } else if (last_ts <= limit) { - os->last_sample = - list_entry(head->prev, struct sample_queue, list); - } - - return 0; -} - /* * When perf record finishes a pass on every buffers, it records this pseudo * event. @@ -568,99 +506,43 @@ static int process_finished_round(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_session *session) { - int ret = flush_sample_queue(session, tool); - if (!ret) - session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; - - return ret; + return ordered_events__flush(session, tool, OE_FLUSH__ROUND); } -/* The queue is ordered by time */ -static void __queue_event(struct sample_queue *new, struct perf_session *s) -{ - struct ordered_samples *os = &s->ordered_samples; - struct sample_queue *sample = os->last_sample; - u64 timestamp = new->timestamp; - struct list_head *p; - - ++os->nr_samples; - os->last_sample = new; - - if (!sample) { - list_add(&new->list, &os->samples); - os->max_timestamp = timestamp; - return; - } - - /* - * last_sample might point to some random place in the list as it's - * the last queued event. We expect that the new event is close to - * this. - */ - if (sample->timestamp <= timestamp) { - while (sample->timestamp <= timestamp) { - p = sample->list.next; - if (p == &os->samples) { - list_add_tail(&new->list, &os->samples); - os->max_timestamp = timestamp; - return; - } - sample = list_entry(p, struct sample_queue, list); - } - list_add_tail(&new->list, &sample->list); - } else { - while (sample->timestamp > timestamp) { - p = sample->list.prev; - if (p == &os->samples) { - list_add(&new->list, &os->samples); - return; - } - sample = list_entry(p, struct sample_queue, list); - } - list_add(&new->list, &sample->list); - } -} - -#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) - int perf_session_queue_event(struct perf_session *s, union perf_event *event, - struct perf_sample *sample, u64 file_offset) + struct perf_tool *tool, struct perf_sample *sample, + u64 file_offset) { - struct ordered_samples *os = &s->ordered_samples; - struct list_head *sc = &os->sample_cache; + struct ordered_events *oe = &s->ordered_events; u64 timestamp = sample->time; - struct sample_queue *new; + struct ordered_event *new; if (!timestamp || timestamp == ~0ULL) return -ETIME; - if (timestamp < s->ordered_samples.last_flush) { - printf("Warning: Timestamp below last timeslice flush\n"); - return -EINVAL; + if (timestamp < oe->last_flush) { + WARN_ONCE(1, "Timestamp below last timeslice flush\n"); + + pr_oe_time(timestamp, "out of order event"); + pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n", + oe->last_flush_type); + + /* We could get out of order messages after forced flush. */ + if (oe->last_flush_type != OE_FLUSH__HALF) + return -EINVAL; } - if (!list_empty(sc)) { - new = list_entry(sc->next, struct sample_queue, list); - list_del(&new->list); - } else if (os->sample_buffer) { - new = os->sample_buffer + os->sample_buffer_idx; - if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) - os->sample_buffer = NULL; - } else { - os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); - if (!os->sample_buffer) - return -ENOMEM; - list_add(&os->sample_buffer->list, &os->to_free); - os->sample_buffer_idx = 2; - new = os->sample_buffer + 1; + new = ordered_events__new(oe, timestamp); + if (!new) { + ordered_events__flush(s, tool, OE_FLUSH__HALF); + new = ordered_events__new(oe, timestamp); } - new->timestamp = timestamp; + if (!new) + return -ENOMEM; + new->file_offset = file_offset; new->event = event; - - __queue_event(new, s); - return 0; } @@ -920,11 +802,10 @@ perf_session__deliver_sample(struct perf_session *session, &sample->read.one, machine); } -static int perf_session_deliver_event(struct perf_session *session, - union perf_event *event, - struct perf_sample *sample, - struct perf_tool *tool, - u64 file_offset) +int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, u64 file_offset) { struct perf_evsel *evsel; struct machine *machine; @@ -1005,8 +886,10 @@ static s64 perf_session__process_user_event(struct perf_session *session, switch (event->header.type) { case PERF_RECORD_HEADER_ATTR: err = tool->attr(tool, event, &session->evlist); - if (err == 0) + if (err == 0) { perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } return err; case PERF_RECORD_HEADER_EVENT_TYPE: /* @@ -1036,6 +919,61 @@ static void event_swap(union perf_event *event, bool sample_id_all) swap(event, sample_id_all); } +int perf_session__peek_event(struct perf_session *session, off_t file_offset, + void *buf, size_t buf_sz, + union perf_event **event_ptr, + struct perf_sample *sample) +{ + union perf_event *event; + size_t hdr_sz, rest; + int fd; + + if (session->one_mmap && !session->header.needs_swap) { + event = file_offset - session->one_mmap_offset + + session->one_mmap_addr; + goto out_parse_sample; + } + + if (perf_data_file__is_pipe(session->file)) + return -1; + + fd = perf_data_file__fd(session->file); + hdr_sz = sizeof(struct perf_event_header); + + if (buf_sz < hdr_sz) + return -1; + + if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || + readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz) + return -1; + + event = (union perf_event *)buf; + + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); + + if (event->header.size < hdr_sz) + return -1; + + rest = event->header.size - hdr_sz; + + if (readn(fd, &buf, rest) != (ssize_t)rest) + return -1; + + if (session->header.needs_swap) + event_swap(event, perf_evlist__sample_id_all(session->evlist)); + +out_parse_sample: + + if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && + perf_evlist__parse_sample(session->evlist, event, sample)) + return -1; + + *event_ptr = event; + + return 0; +} + static s64 perf_session__process_event(struct perf_session *session, union perf_event *event, struct perf_tool *tool, @@ -1062,15 +1000,15 @@ static s64 perf_session__process_event(struct perf_session *session, if (ret) return ret; - if (tool->ordered_samples) { - ret = perf_session_queue_event(session, event, &sample, + if (tool->ordered_events) { + ret = perf_session_queue_event(session, event, tool, &sample, file_offset); if (ret != -ETIME) return ret; } - return perf_session_deliver_event(session, event, &sample, tool, - file_offset); + return perf_session__deliver_event(session, event, &sample, tool, + file_offset); } void perf_event_header__bswap(struct perf_event_header *hdr) @@ -1222,12 +1160,11 @@ more: goto more; done: /* do the final flush for ordered samples */ - session->ordered_samples.next_flush = ULLONG_MAX; - err = flush_sample_queue(session, tool); + err = ordered_events__flush(session, tool, OE_FLUSH__FINAL); out_err: free(buf); perf_session__warn_about_errors(session, tool); - perf_session_free_sample_buffers(session); + ordered_events__free(&session->ordered_events); return err; } @@ -1368,12 +1305,11 @@ more: out: /* do the final flush for ordered samples */ - session->ordered_samples.next_flush = ULLONG_MAX; - err = flush_sample_queue(session, tool); + err = ordered_events__flush(session, tool, OE_FLUSH__FINAL); out_err: ui_progress__finish(); perf_session__warn_about_errors(session, tool); - perf_session_free_sample_buffers(session); + ordered_events__free(&session->ordered_events); session->one_mmap = false; return err; } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 0321013bd9fd..8dd41cad2d59 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -9,26 +9,13 @@ #include "symbol.h" #include "thread.h" #include "data.h" +#include "ordered-events.h" #include <linux/rbtree.h> #include <linux/perf_event.h> -struct sample_queue; struct ip_callchain; struct thread; -struct ordered_samples { - u64 last_flush; - u64 next_flush; - u64 max_timestamp; - struct list_head samples; - struct list_head sample_cache; - struct list_head to_free; - struct sample_queue *sample_buffer; - struct sample_queue *last_sample; - int sample_buffer_idx; - unsigned int nr_samples; -}; - struct perf_session { struct perf_header header; struct machines machines; @@ -39,7 +26,7 @@ struct perf_session { bool one_mmap; void *one_mmap_addr; u64 one_mmap_offset; - struct ordered_samples ordered_samples; + struct ordered_events ordered_events; struct perf_data_file *file; }; @@ -58,6 +45,11 @@ void perf_session__delete(struct perf_session *session); void perf_event_header__bswap(struct perf_event_header *hdr); +int perf_session__peek_event(struct perf_session *session, off_t file_offset, + void *buf, size_t buf_sz, + union perf_event **event_ptr, + struct perf_sample *sample); + int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, u64 size, struct perf_tool *tool); @@ -65,10 +57,16 @@ int perf_session__process_events(struct perf_session *session, struct perf_tool *tool); int perf_session_queue_event(struct perf_session *s, union perf_event *event, - struct perf_sample *sample, u64 file_offset); + struct perf_tool *tool, struct perf_sample *sample, + u64 file_offset); void perf_tool__fill_defaults(struct perf_tool *tool); +int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, u64 file_offset); + int perf_session__resolve_callchain(struct perf_session *session, struct perf_evsel *evsel, struct thread *thread, diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 14e5a039bc45..1958637cf136 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -70,12 +70,14 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { const char *comm = thread__comm_str(he->thread); - return repsep_snprintf(bf, size, "%*s:%5d", width - 6, - comm ?: "", he->thread->tid); + + width = max(7U, width) - 6; + return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid, + width, width, comm ?: ""); } struct sort_entry sort_thread = { - .se_header = "Command: Pid", + .se_header = " Pid:Command", .se_cmp = sort__thread_cmp, .se_snprintf = hist_entry__thread_snprintf, .se_width_idx = HISTC_THREAD, @@ -106,7 +108,7 @@ sort__comm_sort(struct hist_entry *left, struct hist_entry *right) static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm)); + return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); } struct sort_entry sort_comm = { @@ -152,10 +154,10 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf, if (map && map->dso) { const char *dso_name = !verbose ? map->dso->short_name : map->dso->long_name; - return repsep_snprintf(bf, size, "%-*s", width, dso_name); + return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); } - return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); + return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); } static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, @@ -257,7 +259,10 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, width - ret, ""); } - return ret; + if (ret > width) + bf[width] = '\0'; + + return width; } static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, @@ -302,10 +307,9 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, - size_t size, - unsigned int width __maybe_unused) + size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%s", he->srcline); + return repsep_snprintf(bf, size, "%*.*-s", width, width, he->srcline); } struct sort_entry sort_srcline = { @@ -332,7 +336,7 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*s", width, + return repsep_snprintf(bf, size, "%-*.*s", width, width, he->parent ? he->parent->name : "[other]"); } @@ -354,7 +358,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%*d", width, he->cpu); + return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); } struct sort_entry sort_cpu = { @@ -484,7 +488,7 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, else if (he->branch_info->flags.mispred) out = "Y"; - return repsep_snprintf(bf, size, "%-*s", width, out); + return repsep_snprintf(bf, size, "%-*.*s", width, width, out); } /* --sort daddr_sym */ @@ -1194,7 +1198,7 @@ bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) return hse_a->se == hse_b->se; } -void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) +void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) { struct hpp_sort_entry *hse; @@ -1202,20 +1206,21 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) return; hse = container_of(fmt, struct hpp_sort_entry, hpp); - hists__new_col_len(hists, hse->se->se_width_idx, - strlen(hse->se->se_header)); + hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); } static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct perf_evsel *evsel) { struct hpp_sort_entry *hse; - size_t len; + size_t len = fmt->user_len; hse = container_of(fmt, struct hpp_sort_entry, hpp); - len = hists__col_len(&evsel->hists, hse->se->se_width_idx); - return scnprintf(hpp->buf, hpp->size, "%-*s", len, hse->se->se_header); + if (!len) + len = hists__col_len(&evsel->hists, hse->se->se_width_idx); + + return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); } static int __sort__hpp_width(struct perf_hpp_fmt *fmt, @@ -1223,20 +1228,26 @@ static int __sort__hpp_width(struct perf_hpp_fmt *fmt, struct perf_evsel *evsel) { struct hpp_sort_entry *hse; + size_t len = fmt->user_len; hse = container_of(fmt, struct hpp_sort_entry, hpp); - return hists__col_len(&evsel->hists, hse->se->se_width_idx); + if (!len) + len = hists__col_len(&evsel->hists, hse->se->se_width_idx); + + return len; } static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) { struct hpp_sort_entry *hse; - size_t len; + size_t len = fmt->user_len; hse = container_of(fmt, struct hpp_sort_entry, hpp); - len = hists__col_len(he->hists, hse->se->se_width_idx); + + if (!len) + len = hists__col_len(he->hists, hse->se->se_width_idx); return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); } @@ -1253,6 +1264,7 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd) } hse->se = sd->entry; + hse->hpp.name = sd->entry->se_header; hse->hpp.header = __sort__hpp_header; hse->hpp.width = __sort__hpp_width; hse->hpp.entry = __sort__hpp_entry; @@ -1265,6 +1277,8 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd) INIT_LIST_HEAD(&hse->hpp.list); INIT_LIST_HEAD(&hse->hpp.sort_list); hse->hpp.elide = false; + hse->hpp.len = 0; + hse->hpp.user_len = 0; return hse; } @@ -1439,7 +1453,7 @@ static int __setup_sorting(void) int ret = 0; if (sort_keys == NULL) { - if (field_order) { + if (is_strict_order(field_order)) { /* * If user specified field order but no sort order, * we'll honor it and not add default sort orders. @@ -1625,23 +1639,36 @@ static void reset_dimensions(void) memory_sort_dimensions[i].taken = 0; } +bool is_strict_order(const char *order) +{ + return order && (*order != '+'); +} + static int __setup_output_field(void) { - char *tmp, *tok, *str; - int ret = 0; + char *tmp, *tok, *str, *strp; + int ret = -EINVAL; if (field_order == NULL) return 0; reset_dimensions(); - str = strdup(field_order); + strp = str = strdup(field_order); if (str == NULL) { error("Not enough memory to setup output fields"); return -ENOMEM; } - for (tok = strtok_r(str, ", ", &tmp); + if (!is_strict_order(field_order)) + strp++; + + if (!strlen(strp)) { + error("Invalid --fields key: `+'"); + goto out; + } + + for (tok = strtok_r(strp, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { ret = output_field_add(tok); if (ret == -EINVAL) { @@ -1653,6 +1680,7 @@ static int __setup_output_field(void) } } +out: free(str); return ret; } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 041f0c9cea2b..c03e4ff8beff 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -218,4 +218,5 @@ void perf_hpp__set_elide(int idx, bool elide); int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); +bool is_strict_order(const char *order); #endif /* __PERF_SORT_H */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index d75349979e65..9fb5e9e9f161 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -736,7 +736,7 @@ int dso__load_sym(struct dso *dso, struct map *map, if (symstrs == NULL) goto out_elf_end; - sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); + sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); if (sec_strndx == NULL) goto out_elf_end; @@ -939,8 +939,11 @@ new_symbol: * to it... */ if (symbol_conf.demangle) { - demangled = bfd_demangle(NULL, elf_name, - DMGL_PARAMS | DMGL_ANSI); + int demangle_flags = DMGL_NO_OPTS; + if (verbose) + demangle_flags = DMGL_PARAMS | DMGL_ANSI; + + demangled = bfd_demangle(NULL, elf_name, demangle_flags); if (demangled != NULL) elf_name = demangled; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index eb06746b06b2..ac098a3c2a31 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -15,6 +15,7 @@ #include "machine.h" #include "symbol.h" #include "strlist.h" +#include "header.h" #include <elf.h> #include <limits.h> @@ -523,10 +524,15 @@ struct process_kallsyms_args { struct dso *dso; }; +/* + * These are symbols in the kernel image, so make sure that + * sym is from a kernel DSO. + */ bool symbol__is_idle(struct symbol *sym) { const char * const idle_symbols[] = { "cpu_idle", + "cpu_startup_entry", "intel_idle", "default_idle", "native_safe_halt", @@ -1468,8 +1474,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, if (vmlinux[0] == '/') snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); else - snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", - symbol_conf.symfs, vmlinux); + symbol__join_symfs(symfs_vmlinux, vmlinux); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; @@ -1745,10 +1750,11 @@ static void vmlinux_path__exit(void) zfree(&vmlinux_path); } -static int vmlinux_path__init(void) +static int vmlinux_path__init(struct perf_session_env *env) { struct utsname uts; char bf[PATH_MAX]; + char *kernel_version; vmlinux_path = malloc(sizeof(char *) * 5); if (vmlinux_path == NULL) @@ -1763,25 +1769,31 @@ static int vmlinux_path__init(void) goto out_fail; ++vmlinux_path__nr_entries; - /* only try running kernel version if no symfs was given */ + /* only try kernel version if no symfs was given */ if (symbol_conf.symfs[0] != 0) return 0; - if (uname(&uts) < 0) - return -1; + if (env) { + kernel_version = env->os_release; + } else { + if (uname(&uts) < 0) + goto out_fail; + + kernel_version = uts.release; + } - snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); + snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version); vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); if (vmlinux_path[vmlinux_path__nr_entries] == NULL) goto out_fail; ++vmlinux_path__nr_entries; - snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); + snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version); vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); if (vmlinux_path[vmlinux_path__nr_entries] == NULL) goto out_fail; ++vmlinux_path__nr_entries; snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", - uts.release); + kernel_version); vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); if (vmlinux_path[vmlinux_path__nr_entries] == NULL) goto out_fail; @@ -1827,7 +1839,7 @@ static bool symbol__read_kptr_restrict(void) return value; } -int symbol__init(void) +int symbol__init(struct perf_session_env *env) { const char *symfs; @@ -1842,7 +1854,7 @@ int symbol__init(void) symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - sizeof(struct symbol)); - if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) + if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) return -1; if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index e7295e93cff9..3f95ea0357e3 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -13,6 +13,7 @@ #include <libgen.h> #include "build-id.h" #include "event.h" +#include "util.h" #ifdef HAVE_LIBELF_SUPPORT #include <libelf.h> @@ -59,6 +60,7 @@ extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, #endif #ifndef DMGL_PARAMS +#define DMGL_NO_OPTS 0 /* For readability... */ #define DMGL_PARAMS (1 << 0) /* Include function args */ #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ #endif @@ -143,6 +145,14 @@ struct symbol_conf { }; extern struct symbol_conf symbol_conf; + +static inline int __symbol__join_symfs(char *bf, size_t size, const char *path) +{ + return path__join(bf, size, symbol_conf.symfs, path); +} + +#define symbol__join_symfs(bf, path) __symbol__join_symfs(bf, sizeof(bf), path) + extern int vmlinux_path__nr_entries; extern char **vmlinux_path; @@ -253,7 +263,8 @@ int modules__parse(const char *filename, void *arg, int filename__read_debuglink(const char *filename, char *debuglink, size_t size); -int symbol__init(void); +struct perf_session_env; +int symbol__init(struct perf_session_env *env); void symbol__exit(void); void symbol__elf_init(void); struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 12c7a253a63c..a9df7f2c6dc9 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -42,7 +42,7 @@ struct thread *thread__new(pid_t pid, pid_t tid) goto err_thread; snprintf(comm_str, 32, ":%d", tid); - comm = comm__new(comm_str, 0); + comm = comm__new(comm_str, 0, false); free(comm_str); if (!comm) goto err_thread; @@ -81,19 +81,33 @@ struct comm *thread__comm(const struct thread *thread) return list_first_entry(&thread->comm_list, struct comm, list); } +struct comm *thread__exec_comm(const struct thread *thread) +{ + struct comm *comm, *last = NULL; + + list_for_each_entry(comm, &thread->comm_list, list) { + if (comm->exec) + return comm; + last = comm; + } + + return last; +} + /* CHECKME: time should always be 0 if event aren't ordered */ -int thread__set_comm(struct thread *thread, const char *str, u64 timestamp) +int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, + bool exec) { struct comm *new, *curr = thread__comm(thread); int err; /* Override latest entry if it had no specific time coverage */ - if (!curr->start) { - err = comm__override(curr, str, timestamp); + if (!curr->start && !curr->exec) { + err = comm__override(curr, str, timestamp, exec); if (err) return err; } else { - new = comm__new(str, timestamp); + new = comm__new(str, timestamp, exec); if (!new) return -ENOMEM; list_add(&new->list, &thread->comm_list); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 716b7723cce2..8c75fa774706 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -38,9 +38,17 @@ static inline void thread__exited(struct thread *thread) thread->dead = true; } -int thread__set_comm(struct thread *thread, const char *comm, u64 timestamp); +int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp, + bool exec); +static inline int thread__set_comm(struct thread *thread, const char *comm, + u64 timestamp) +{ + return __thread__set_comm(thread, comm, timestamp, false); +} + int thread__comm_len(struct thread *thread); struct comm *thread__comm(const struct thread *thread); +struct comm *thread__exec_comm(const struct thread *thread); const char *thread__comm_str(const struct thread *thread); void thread__insert_map(struct thread *thread, struct map *map); int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h index 4385816d3d49..f11636966a0f 100644 --- a/tools/perf/util/tool.h +++ b/tools/perf/util/tool.h @@ -40,7 +40,7 @@ struct perf_tool { event_op2 tracing_data; event_op2 finished_round, build_id; - bool ordered_samples; + bool ordered_events; bool ordering_requires_timestamps; }; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 57aaccc1692e..5c9bdd1591a9 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -30,6 +30,11 @@ struct scripting_context *scripting_context; +static int flush_script_unsupported(void) +{ + return 0; +} + static int stop_script_unsupported(void) { return 0; @@ -74,6 +79,7 @@ static int python_generate_script_unsupported(struct pevent *pevent struct scripting_ops python_scripting_unsupported_ops = { .name = "Python", .start_script = python_start_script_unsupported, + .flush_script = flush_script_unsupported, .stop_script = stop_script_unsupported, .process_event = process_event_unsupported, .generate_script = python_generate_script_unsupported, @@ -137,6 +143,7 @@ static int perl_generate_script_unsupported(struct pevent *pevent struct scripting_ops perl_scripting_unsupported_ops = { .name = "Perl", .start_script = perl_start_script_unsupported, + .flush_script = flush_script_unsupported, .stop_script = stop_script_unsupported, .process_event = process_event_unsupported, .generate_script = perl_generate_script_unsupported, diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 7b6d68688327..52aaa19e1eb1 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -64,6 +64,7 @@ struct perf_session; struct scripting_ops { const char *name; int (*start_script) (const char *script, int argc, const char **argv); + int (*flush_script) (void); int (*stop_script) (void); void (*process_event) (union perf_event *event, struct perf_sample *sample, diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index e52e7461911b..24e8d871b74e 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -13,6 +13,7 @@ #include <limits.h> #include <byteswap.h> #include <linux/kernel.h> +#include <unistd.h> /* * XXX We need to find a better place for these things... @@ -282,6 +283,18 @@ void get_term_dimensions(struct winsize *ws) ws->ws_col = 80; } +void set_term_quiet_input(struct termios *old) +{ + struct termios tc; + + tcgetattr(0, old); + tc = *old; + tc.c_lflag &= ~(ICANON | ECHO); + tc.c_cc[VMIN] = 0; + tc.c_cc[VTIME] = 0; + tcsetattr(0, TCSANOW, &tc); +} + static void set_tracing_events_path(const char *mountpoint) { snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", @@ -443,6 +456,7 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep) size_t size = 0, alloc_size = 0; void *bf = NULL, *nbf; int fd, n, err = 0; + char sbuf[STRERR_BUFSIZE]; fd = open(filename, O_RDONLY); if (fd < 0) @@ -463,8 +477,8 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep) n = read(fd, bf + size, alloc_size - size); if (n < 0) { if (size) { - pr_warning("read failed %d: %s\n", - errno, strerror(errno)); + pr_warning("read failed %d: %s\n", errno, + strerror_r(errno, sbuf, sizeof(sbuf))); err = 0; } else err = -errno; @@ -536,3 +550,39 @@ void mem_bswap_64(void *src, int byte_size) ++m; } } + +bool find_process(const char *name) +{ + size_t len = strlen(name); + DIR *dir; + struct dirent *d; + int ret = -1; + + dir = opendir(procfs__mountpoint()); + if (!dir) + return -1; + + /* Walk through the directory. */ + while (ret && (d = readdir(dir)) != NULL) { + char path[PATH_MAX]; + char *data; + size_t size; + + if ((d->d_type != DT_DIR) || + !strcmp(".", d->d_name) || + !strcmp("..", d->d_name)) + continue; + + scnprintf(path, sizeof(path), "%s/%s/comm", + procfs__mountpoint(), d->d_name); + + if (filename__read_str(path, &data, &size)) + continue; + + ret = strncmp(name, data, len); + free(data); + } + + closedir(dir); + return ret ? false : true; +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 66864364ccb4..d6a79b1fb28c 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -68,12 +68,14 @@ #include <sys/socket.h> #include <sys/ioctl.h> #include <inttypes.h> +#include <linux/kernel.h> #include <linux/magic.h> #include <linux/types.h> #include <sys/ttydefaults.h> #include <api/fs/debugfs.h> #include <termios.h> #include <linux/bitops.h> +#include <termios.h> extern const char *graph_line; extern const char *graph_dotted_line; @@ -307,6 +309,7 @@ extern unsigned int page_size; extern int cacheline_size; void get_term_dimensions(struct winsize *ws); +void set_term_quiet_input(struct termios *old); struct parse_tag { char tag; @@ -317,6 +320,21 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags); #define SRCLINE_UNKNOWN ((char *) "??:0") +static inline int path__join(char *bf, size_t size, + const char *path1, const char *path2) +{ + return scnprintf(bf, size, "%s%s%s", path1, path1[0] ? "/" : "", path2); +} + +static inline int path__join3(char *bf, size_t size, + const char *path1, const char *path2, + const char *path3) +{ + return scnprintf(bf, size, "%s%s%s%s%s", + path1, path1[0] ? "/" : "", + path2, path2[0] ? "/" : "", path3); +} + struct dso; char *get_srcline(struct dso *dso, unsigned long addr); @@ -330,4 +348,5 @@ void mem_bswap_64(void *src, int byte_size); void mem_bswap_32(void *src, int byte_size); const char *get_filename_for_perf_kvm(void); +bool find_process(const char *name); #endif /* GIT_COMPAT_UTIL_H */ |