summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-19 15:38:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-19 15:38:41 -0700
commitd5e2d00898bdfed9586472679760fc81a2ca2d02 (patch)
tree55ac781983bf7144230ad8a5a995ce02b6ac39a1 /arch/powerpc/perf
parent31e182363b39d84031eadf0caf6d99fd9eb056f0 (diff)
parent6e669f085d595cb6053920832c89f1a13067db44 (diff)
downloadtalos-obmc-linux-d5e2d00898bdfed9586472679760fc81a2ca2d02.tar.gz
talos-obmc-linux-d5e2d00898bdfed9586472679760fc81a2ca2d02.zip
Merge tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "This was delayed a day or two by some build-breakage on old toolchains which we've now fixed. There's two PCI commits both acked by Bjorn. There's one commit to mm/hugepage.c which is (co)authored by Kirill. Highlights: - Restructure Linux PTE on Book3S/64 to Radix format from Paul Mackerras - Book3s 64 MMU cleanup in preparation for Radix MMU from Aneesh Kumar K.V - Add POWER9 cputable entry from Michael Neuling - FPU/Altivec/VSX save/restore optimisations from Cyril Bur - Add support for new ftrace ABI on ppc64le from Torsten Duwe Various cleanups & minor fixes from: - Adam Buchbinder, Andrew Donnellan, Balbir Singh, Christophe Leroy, Cyril Bur, Luis Henriques, Madhavan Srinivasan, Pan Xinhui, Russell Currey, Sukadev Bhattiprolu, Suraj Jitindar Singh. General: - atomics: Allow architectures to define their own __atomic_op_* helpers from Boqun Feng - Implement atomic{, 64}_*_return_* variants and acquire/release/ relaxed variants for (cmp)xchg from Boqun Feng - Add powernv_defconfig from Jeremy Kerr - Fix BUG_ON() reporting in real mode from Balbir Singh - Add xmon command to dump OPAL msglog from Andrew Donnellan - Add xmon command to dump process/task similar to ps(1) from Douglas Miller - Clean up memory hotplug failure paths from David Gibson pci/eeh: - Redesign SR-IOV on PowerNV to give absolute isolation between VFs from Wei Yang. - EEH Support for SRIOV VFs from Wei Yang and Gavin Shan. - PCI/IOV: Rename and export virtfn_{add, remove} from Wei Yang - PCI: Add pcibios_bus_add_device() weak function from Wei Yang - MAINTAINERS: Update EEH details and maintainership from Russell Currey cxl: - Support added to the CXL driver for running on both bare-metal and hypervisor systems, from Christophe Lombard and Frederic Barrat. - Ignore probes for virtual afu pci devices from Vaibhav Jain perf: - Export Power8 generic and cache events to sysfs from Sukadev Bhattiprolu - hv-24x7: Fix usage with chip events, display change in counter values, display domain indices in sysfs, eliminate domain suffix in event names, from Sukadev Bhattiprolu Freescale: - Updates from Scott: "Highlights include 8xx optimizations, 32-bit checksum optimizations, 86xx consolidation, e5500/e6500 cpu hotplug, more fman and other dt bits, and minor fixes/cleanup" * tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (179 commits) powerpc: Fix unrecoverable SLB miss during restore_math() powerpc/8xx: Fix do_mtspr_cpu6() build on older compilers powerpc/rcpm: Fix build break when SMP=n powerpc/book3e-64: Use hardcoded mttmr opcode powerpc/fsl/dts: Add "jedec,spi-nor" flash compatible powerpc/T104xRDB: add tdm riser card node to device tree powerpc32: PAGE_EXEC required for inittext powerpc/mpc85xx: Add pcsphy nodes to FManV3 device tree powerpc/mpc85xx: Add MDIO bus muxing support to the board device tree(s) powerpc/86xx: Introduce and use common dtsi powerpc/86xx: Update device tree powerpc/86xx: Move dts files to fsl directory powerpc/86xx: Switch to kconfig fragments approach powerpc/86xx: Update defconfigs powerpc/86xx: Consolidate common platform code powerpc32: Remove one insn in mulhdu powerpc32: small optimisation in flush_icache_range() powerpc: Simplify test in __dma_sync() powerpc32: move xxxxx_dcache_range() functions inline powerpc32: Remove clear_pages() and define clear_page() inline ...
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.c225
-rw-r--r--arch/powerpc/perf/hv-24x7.h3
-rw-r--r--arch/powerpc/perf/hv-gpci.c43
-rw-r--r--arch/powerpc/perf/power7-pmu.c18
-rw-r--r--arch/powerpc/perf/power8-events-list.h51
-rw-r--r--arch/powerpc/perf/power8-pmu.c112
7 files changed, 298 insertions, 156 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index d1e65ce545b3..97a1d40d8696 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -651,7 +651,7 @@ static void pmao_restore_workaround(bool ebb)
/*
* We are already soft-disabled in power_pmu_enable(). We need to hard
- * enable to actually prevent the PMU exception from firing.
+ * disable to actually prevent the PMU exception from firing.
*/
hard_irq_disable();
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 3b09ecfd0aee..2da41b78cb6d 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -27,20 +27,6 @@
#include "hv-24x7-catalog.h"
#include "hv-common.h"
-static const char *event_domain_suffix(unsigned domain)
-{
- switch (domain) {
-#define DOMAIN(n, v, x, c) \
- case HV_PERF_DOMAIN_##n: \
- return "__" #n;
-#include "hv-24x7-domains.h"
-#undef DOMAIN
- default:
- WARN(1, "unknown domain %d\n", domain);
- return "__UNKNOWN_DOMAIN_SUFFIX";
- }
-}
-
static bool domain_is_valid(unsigned domain)
{
switch (domain) {
@@ -68,6 +54,24 @@ static bool is_physical_domain(unsigned domain)
}
}
+static const char *domain_name(unsigned domain)
+{
+ if (!domain_is_valid(domain))
+ return NULL;
+
+ switch (domain) {
+ case HV_PERF_DOMAIN_PHYS_CHIP: return "Physical Chip";
+ case HV_PERF_DOMAIN_PHYS_CORE: return "Physical Core";
+ case HV_PERF_DOMAIN_VCPU_HOME_CORE: return "VCPU Home Core";
+ case HV_PERF_DOMAIN_VCPU_HOME_CHIP: return "VCPU Home Chip";
+ case HV_PERF_DOMAIN_VCPU_HOME_NODE: return "VCPU Home Node";
+ case HV_PERF_DOMAIN_VCPU_REMOTE_NODE: return "VCPU Remote Node";
+ }
+
+ WARN_ON_ONCE(domain);
+ return NULL;
+}
+
static bool catalog_entry_domain_is_valid(unsigned domain)
{
return is_physical_domain(domain);
@@ -101,6 +105,7 @@ static bool catalog_entry_domain_is_valid(unsigned domain)
EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
/* u16 */
EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
+EVENT_DEFINE_RANGE_FORMAT(chip, config, 16, 31);
EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
/* u32, see "data_offset" */
EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
@@ -115,6 +120,7 @@ static struct attribute *format_attrs[] = {
&format_attr_domain.attr,
&format_attr_offset.attr,
&format_attr_core.attr,
+ &format_attr_chip.attr,
&format_attr_vcpu.attr,
&format_attr_lpar.attr,
NULL,
@@ -274,32 +280,70 @@ static unsigned long h_get_24x7_catalog_page(char page[],
version, index);
}
-static unsigned core_domains[] = {
- HV_PERF_DOMAIN_PHYS_CORE,
- HV_PERF_DOMAIN_VCPU_HOME_CORE,
- HV_PERF_DOMAIN_VCPU_HOME_CHIP,
- HV_PERF_DOMAIN_VCPU_HOME_NODE,
- HV_PERF_DOMAIN_VCPU_REMOTE_NODE,
-};
-/* chip event data always yeilds a single event, core yeilds multiple */
-#define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
-
+/*
+ * Each event we find in the catalog, will have a sysfs entry. Format the
+ * data for this sysfs entry based on the event's domain.
+ *
+ * Events belonging to the Chip domain can only be monitored in that domain.
+ * i.e the domain for these events is a fixed/knwon value.
+ *
+ * Events belonging to the Core domain can be monitored either in the physical
+ * core or in one of the virtual CPU domains. So the domain value for these
+ * events must be specified by the user (i.e is a required parameter). Format
+ * the Core events with 'domain=?' so the perf-tool can error check required
+ * parameters.
+ *
+ * NOTE: For the Core domain events, rather than making domain a required
+ * parameter we could default it to PHYS_CORE and allowe users to
+ * override the domain to one of the VCPU domains.
+ *
+ * However, this can make the interface a little inconsistent.
+ *
+ * If we set domain=2 (PHYS_CHIP) and allow user to override this field
+ * the user may be tempted to also modify the "offset=x" field in which
+ * can lead to confusing usage. Consider the HPM_PCYC (offset=0x18) and
+ * HPM_INST (offset=0x20) events. With:
+ *
+ * perf stat -e hv_24x7/HPM_PCYC,offset=0x20/
+ *
+ * we end up monitoring HPM_INST, while the command line has HPM_PCYC.
+ *
+ * By not assigning a default value to the domain for the Core events,
+ * we can have simple guidelines:
+ *
+ * - Specifying values for parameters with "=?" is required.
+ *
+ * - Specifying (i.e overriding) values for other parameters
+ * is undefined.
+ */
static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
{
const char *sindex;
const char *lpar;
+ const char *domain_str;
+ char buf[8];
- if (is_physical_domain(domain)) {
+ switch (domain) {
+ case HV_PERF_DOMAIN_PHYS_CHIP:
+ snprintf(buf, sizeof(buf), "%d", domain);
+ domain_str = buf;
+ lpar = "0x0";
+ sindex = "chip";
+ break;
+ case HV_PERF_DOMAIN_PHYS_CORE:
+ domain_str = "?";
lpar = "0x0";
sindex = "core";
- } else {
+ break;
+ default:
+ domain_str = "?";
lpar = "?";
sindex = "vcpu";
}
return kasprintf(GFP_KERNEL,
- "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
- domain,
+ "domain=%s,offset=0x%x,%s=?,lpar=%s",
+ domain_str,
be16_to_cpu(event->event_counter_offs) +
be16_to_cpu(event->event_group_record_offs),
sindex,
@@ -339,6 +383,15 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
return &attr->attr.attr;
}
+/*
+ * Allocate and initialize strings representing event attributes.
+ *
+ * NOTE: The strings allocated here are never destroyed and continue to
+ * exist till shutdown. This is to allow us to create as many events
+ * from the catalog as possible, even if we encounter errors with some.
+ * In case of changes to error paths in future, these may need to be
+ * freed by the caller.
+ */
static struct attribute *device_str_attr_create(char *name, int name_max,
int name_nonce,
char *str, size_t str_max)
@@ -370,16 +423,6 @@ out_s:
return NULL;
}
-static void device_str_attr_destroy(struct attribute *attr)
-{
- struct dev_ext_attribute *d;
-
- d = container_of(attr, struct dev_ext_attribute, attr.attr);
- kfree(d->var);
- kfree(d->attr.attr.name);
- kfree(d);
-}
-
static struct attribute *event_to_attr(unsigned ix,
struct hv_24x7_event_data *event,
unsigned domain,
@@ -387,7 +430,6 @@ static struct attribute *event_to_attr(unsigned ix,
{
int event_name_len;
char *ev_name, *a_ev_name, *val;
- const char *ev_suffix;
struct attribute *attr;
if (!domain_is_valid(domain)) {
@@ -400,14 +442,13 @@ static struct attribute *event_to_attr(unsigned ix,
if (!val)
return NULL;
- ev_suffix = event_domain_suffix(domain);
ev_name = event_name(event, &event_name_len);
if (!nonce)
- a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s",
- (int)event_name_len, ev_name, ev_suffix);
+ a_ev_name = kasprintf(GFP_KERNEL, "%.*s",
+ (int)event_name_len, ev_name);
else
- a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
- (int)event_name_len, ev_name, ev_suffix, nonce);
+ a_ev_name = kasprintf(GFP_KERNEL, "%.*s__%d",
+ (int)event_name_len, ev_name, nonce);
if (!a_ev_name)
goto out_val;
@@ -452,45 +493,14 @@ event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
return device_str_attr_create(name, nl, nonce, desc, dl);
}
-static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
+static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
struct hv_24x7_event_data *event, int nonce)
{
- unsigned i;
-
- switch (event->domain) {
- case HV_PERF_DOMAIN_PHYS_CHIP:
- *attrs = event_to_attr(ix, event, event->domain, nonce);
- return 1;
- case HV_PERF_DOMAIN_PHYS_CORE:
- for (i = 0; i < ARRAY_SIZE(core_domains); i++) {
- attrs[i] = event_to_attr(ix, event, core_domains[i],
- nonce);
- if (!attrs[i]) {
- pr_warn("catalog event %u: individual attr %u "
- "creation failure\n", ix, i);
- for (; i; i--)
- device_str_attr_destroy(attrs[i - 1]);
- return -1;
- }
- }
- return i;
- default:
- pr_warn("catalog event %u: domain %u is not allowed in the "
- "catalog\n", ix, event->domain);
+ *attrs = event_to_attr(ix, event, event->domain, nonce);
+ if (!*attrs)
return -1;
- }
-}
-static size_t event_to_attr_ct(struct hv_24x7_event_data *event)
-{
- switch (event->domain) {
- case HV_PERF_DOMAIN_PHYS_CHIP:
- return 1;
- case HV_PERF_DOMAIN_PHYS_CORE:
- return ARRAY_SIZE(core_domains);
- default:
- return 0;
- }
+ return 0;
}
/* */
@@ -718,9 +728,8 @@ static int create_events_from_catalog(struct attribute ***events_,
goto e_free;
}
- if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
- pr_err("event_entry_count %zu is invalid\n",
- event_entry_count);
+ if (SIZE_MAX - 1 < event_entry_count) {
+ pr_err("event_entry_count %zu is invalid\n", event_entry_count);
ret = -EIO;
goto e_free;
}
@@ -793,7 +802,7 @@ static int create_events_from_catalog(struct attribute ***events_,
continue;
}
- attr_max += event_to_attr_ct(event);
+ attr_max++;
}
event_idx_last = event_idx;
@@ -843,12 +852,12 @@ static int create_events_from_catalog(struct attribute ***events_,
nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
ct = event_data_to_attrs(event_idx, events + event_attr_ct,
event, nonce);
- if (ct <= 0) {
+ if (ct < 0) {
pr_warn("event %zu (%.*s) creation failure, skipping\n",
event_idx, nl, name);
junk_events++;
} else {
- event_attr_ct += ct;
+ event_attr_ct++;
event_descs[desc_ct] = event_to_desc_attr(event, nonce);
if (event_descs[desc_ct])
desc_ct++;
@@ -953,6 +962,27 @@ e_free:
return ret;
}
+static ssize_t domains_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ int d, n, count = 0;
+ const char *str;
+
+ for (d = 0; d < HV_PERF_DOMAIN_MAX; d++) {
+ str = domain_name(d);
+ if (!str)
+ continue;
+
+ n = sprintf(page, "%d: %s\n", d, str);
+ if (n < 0)
+ break;
+
+ count += n;
+ page += n;
+ }
+ return count;
+}
+
#define PAGE_0_ATTR(_name, _fmt, _expr) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *dev_attr, \
@@ -981,6 +1011,7 @@ PAGE_0_ATTR(catalog_version, "%lld\n",
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
+static DEVICE_ATTR_RO(domains);
static struct bin_attribute *if_bin_attrs[] = {
&bin_attr_catalog,
@@ -990,6 +1021,7 @@ static struct bin_attribute *if_bin_attrs[] = {
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
+ &dev_attr_domains.attr,
NULL,
};
@@ -1081,10 +1113,16 @@ static int add_event_to_24x7_request(struct perf_event *event,
return -EINVAL;
}
- if (is_physical_domain(event_get_domain(event)))
+ switch (event_get_domain(event)) {
+ case HV_PERF_DOMAIN_PHYS_CHIP:
+ idx = event_get_chip(event);
+ break;
+ case HV_PERF_DOMAIN_PHYS_CORE:
idx = event_get_core(event);
- else
+ break;
+ default:
idx = event_get_vcpu(event);
+ }
i = request_buffer->num_requests++;
req = &request_buffer->requests[i];
@@ -1200,11 +1238,12 @@ static int h_24x7_event_init(struct perf_event *event)
return -EACCES;
}
- /* see if the event complains */
+ /* Get the initial value of the counter for this event */
if (single_24x7_request(event, &ct)) {
pr_devel("test hcall failed\n");
return -EIO;
}
+ (void)local64_xchg(&event->hw.prev_count, ct);
return 0;
}
@@ -1267,6 +1306,16 @@ static void h_24x7_event_read(struct perf_event *event)
h24x7hw = &get_cpu_var(hv_24x7_hw);
h24x7hw->events[i] = event;
put_cpu_var(h24x7hw);
+ /*
+ * Clear the event count so we can compute the _change_
+ * in the 24x7 raw counter value at the end of the txn.
+ *
+ * Note that we could alternatively read the 24x7 value
+ * now and save its value in event->hw.prev_count. But
+ * that would require issuing a hcall, which would then
+ * defeat the purpose of using the txn interface.
+ */
+ local64_set(&event->count, 0);
}
put_cpu_var(hv_24x7_reqb);
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 0f9fa21a29f2..791455e7f5cf 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -7,6 +7,7 @@ enum hv_perf_domains {
#define DOMAIN(n, v, x, c) HV_PERF_DOMAIN_##n = v,
#include "hv-24x7-domains.h"
#undef DOMAIN
+ HV_PERF_DOMAIN_MAX,
};
struct hv_24x7_request {
@@ -80,7 +81,7 @@ struct hv_24x7_result {
__u8 results_complete;
__be16 num_elements_returned;
- /* This is a copy of @data_size from the coresponding hv_24x7_request */
+ /* This is a copy of @data_size from the corresponding hv_24x7_request */
__be16 result_element_data_size;
__u8 reserved[0x2];
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 856fe6e03c2a..7aa37236bb70 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -127,8 +127,16 @@ static const struct attribute_group *attr_groups[] = {
NULL,
};
-#define GPCI_MAX_DATA_BYTES \
- (1024 - sizeof(struct hv_get_perf_counter_info_params))
+#define HGPCI_REQ_BUFFER_SIZE 4096
+#define HGPCI_MAX_DATA_BYTES \
+ (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
+
+DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
+
+struct hv_gpci_request_buffer {
+ struct hv_get_perf_counter_info_params params;
+ uint8_t bytes[HGPCI_MAX_DATA_BYTES];
+} __packed;
static unsigned long single_gpci_request(u32 req, u32 starting_index,
u16 secondary_index, u8 version_in, u32 offset, u8 length,
@@ -137,24 +145,21 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
unsigned long ret;
size_t i;
u64 count;
+ struct hv_gpci_request_buffer *arg;
+
+ arg = (void *)get_cpu_var(hv_gpci_reqb);
+ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
- struct {
- struct hv_get_perf_counter_info_params params;
- uint8_t bytes[GPCI_MAX_DATA_BYTES];
- } __packed __aligned(sizeof(uint64_t)) arg = {
- .params = {
- .counter_request = cpu_to_be32(req),
- .starting_index = cpu_to_be32(starting_index),
- .secondary_index = cpu_to_be16(secondary_index),
- .counter_info_version_in = version_in,
- }
- };
+ arg->params.counter_request = cpu_to_be32(req);
+ arg->params.starting_index = cpu_to_be32(starting_index);
+ arg->params.secondary_index = cpu_to_be16(secondary_index);
+ arg->params.counter_info_version_in = version_in;
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
- virt_to_phys(&arg), sizeof(arg));
+ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
if (ret) {
pr_devel("hcall failed: 0x%lx\n", ret);
- return ret;
+ goto out;
}
/*
@@ -163,9 +168,11 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
*/
count = 0;
for (i = offset; i < offset + length; i++)
- count |= arg.bytes[i] << (i - offset);
+ count |= arg->bytes[i] << (i - offset);
*value = count;
+out:
+ put_cpu_var(hv_gpci_reqb);
return ret;
}
@@ -245,10 +252,10 @@ static int h_gpci_event_init(struct perf_event *event)
}
/* last byte within the buffer? */
- if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) {
+ if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
pr_devel("request outside of buffer: %zu > %zu\n",
(size_t)event_get_offset(event) + length,
- GPCI_MAX_DATA_BYTES);
+ HGPCI_MAX_DATA_BYTES);
return -EINVAL;
}
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 5b62f2389290..a383c23a9070 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -54,7 +54,7 @@
* Power7 event codes.
*/
#define EVENT(_name, _code) \
- PME_##_name = _code,
+ _name = _code,
enum {
#include "power7-events-list.h"
@@ -318,14 +318,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
}
static int power7_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
- [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
- [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
- [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
- [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BR_MPRED,
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/perf/power8-events-list.h b/arch/powerpc/perf/power8-events-list.h
new file mode 100644
index 000000000000..741b77edd03e
--- /dev/null
+++ b/arch/powerpc/perf/power8-events-list.h
@@ -0,0 +1,51 @@
+/*
+ * Performance counter support for POWER8 processors.
+ *
+ * Copyright 2014 Sukadev Bhattiprolu, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Power8 event codes.
+ */
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
+EVENT(PM_CMPLU_STALL, 0x4000a)
+EVENT(PM_INST_CMPL, 0x00002)
+EVENT(PM_BRU_FIN, 0x10068)
+EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+
+/* All L1 D cache load references counted at finish, gated by reject */
+EVENT(PM_LD_REF_L1, 0x100ee)
+/* Load Missed L1 */
+EVENT(PM_LD_MISS_L1, 0x3e054)
+/* Store Missed L1 */
+EVENT(PM_ST_MISS_L1, 0x300f0)
+/* L1 cache data prefetches */
+EVENT(PM_L1_PREF, 0x0d8b8)
+/* Instruction fetches from L1 */
+EVENT(PM_INST_FROM_L1, 0x04080)
+/* Demand iCache Miss */
+EVENT(PM_L1_ICACHE_MISS, 0x200fd)
+/* Instruction Demand sectors wriittent into IL1 */
+EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
+/* Instruction prefetch written into IL1 */
+EVENT(PM_IC_PREF_WRITE, 0x0408e)
+/* The data cache was reloaded from local core's L3 due to a demand load */
+EVENT(PM_DATA_FROM_L3, 0x4c042)
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+/* All successful D-side store dispatches for this thread */
+EVENT(PM_L2_ST, 0x17080)
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+EVENT(PM_L2_ST_MISS, 0x17082)
+/* Total HW L3 prefetches(Load+store) */
+EVENT(PM_L3_PREF_ALL, 0x4e052)
+/* Data PTEG reload */
+EVENT(PM_DTLB_MISS, 0x300fc)
+/* ITLB Reloaded */
+EVENT(PM_ITLB_MISS, 0x400fc)
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 9958ba8bf0d2..690d9186a855 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -17,48 +17,16 @@
#include <asm/firmware.h>
#include <asm/cputable.h>
-
/*
* Some power8 event codes.
*/
-#define PM_CYC 0x0001e
-#define PM_GCT_NOSLOT_CYC 0x100f8
-#define PM_CMPLU_STALL 0x4000a
-#define PM_INST_CMPL 0x00002
-#define PM_BRU_FIN 0x10068
-#define PM_BR_MPRED_CMPL 0x400f6
-
-/* All L1 D cache load references counted at finish, gated by reject */
-#define PM_LD_REF_L1 0x100ee
-/* Load Missed L1 */
-#define PM_LD_MISS_L1 0x3e054
-/* Store Missed L1 */
-#define PM_ST_MISS_L1 0x300f0
-/* L1 cache data prefetches */
-#define PM_L1_PREF 0x0d8b8
-/* Instruction fetches from L1 */
-#define PM_INST_FROM_L1 0x04080
-/* Demand iCache Miss */
-#define PM_L1_ICACHE_MISS 0x200fd
-/* Instruction Demand sectors wriittent into IL1 */
-#define PM_L1_DEMAND_WRITE 0x0408c
-/* Instruction prefetch written into IL1 */
-#define PM_IC_PREF_WRITE 0x0408e
-/* The data cache was reloaded from local core's L3 due to a demand load */
-#define PM_DATA_FROM_L3 0x4c042
-/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
-#define PM_DATA_FROM_L3MISS 0x300fe
-/* All successful D-side store dispatches for this thread */
-#define PM_L2_ST 0x17080
-/* All successful D-side store dispatches for this thread that were L2 Miss */
-#define PM_L2_ST_MISS 0x17082
-/* Total HW L3 prefetches(Load+store) */
-#define PM_L3_PREF_ALL 0x4e052
-/* Data PTEG reload */
-#define PM_DTLB_MISS 0x300fc
-/* ITLB Reloaded */
-#define PM_ITLB_MISS 0x400fc
+#define EVENT(_name, _code) _name = _code,
+
+enum {
+#include "power8-events-list.h"
+};
+#undef EVENT
/*
* Raw event encoding for POWER8:
@@ -415,7 +383,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
pmc_inuse |= 1 << pmc;
}
- /* In continous sampling mode, update SDAR on TLB miss */
+ /* In continuous sampling mode, update SDAR on TLB miss */
mmcra = MMCRA_SDAR_MODE_TLB;
mmcr1 = mmcr2 = 0;
@@ -604,6 +572,71 @@ static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
}
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
+CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
+
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
+CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
+CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
+CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
+
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
+
+static struct attribute *power8_events_attr[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(PM_CMPLU_STALL),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_BRU_FIN),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_REF_L1),
+ CACHE_EVENT_PTR(PM_L1_PREF),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_INST_FROM_L1),
+ CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+ CACHE_EVENT_PTR(PM_L3_PREF_ALL),
+ CACHE_EVENT_PTR(PM_L2_ST_MISS),
+ CACHE_EVENT_PTR(PM_L2_ST),
+
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_BRU_FIN),
+
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
+ NULL
+};
+
+static struct attribute_group power8_pmu_events_group = {
+ .name = "events",
+ .attrs = power8_events_attr,
+};
+
PMU_FORMAT_ATTR(event, "config:0-49");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
@@ -640,6 +673,7 @@ struct attribute_group power8_pmu_format_group = {
static const struct attribute_group *power8_pmu_attr_groups[] = {
&power8_pmu_format_group,
+ &power8_pmu_events_group,
NULL,
};
OpenPOWER on IntegriCloud