diff options
Diffstat (limited to 'drivers/hwtracing/coresight')
-rw-r--r-- | drivers/hwtracing/coresight/Kconfig | 3 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-cpu-debug.c | 33 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x-sysfs.c | 317 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x.c | 388 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x.h | 81 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-funnel.c | 48 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-platform.c | 11 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-priv.h | 12 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-replicator.c | 39 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-tmc-etf.c | 49 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-tmc-etr.c | 93 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-tmc.c | 40 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-tmc.h | 11 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight.c | 55 |
14 files changed, 987 insertions, 193 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 14638db4991d..6ff30e25af55 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -4,6 +4,7 @@ # menuconfig CORESIGHT bool "CoreSight Tracing Support" + depends on ARM || ARM64 depends on OF || ACPI select ARM_AMBA select PERF_EVENTS @@ -106,7 +107,7 @@ config CORESIGHT_CPU_DEBUG can quickly get to know program counter (PC), secure state, exception level, etc. Before use debugging functionality, platform needs to ensure the clock domain and power domain are enabled - properly, please refer Documentation/trace/coresight-cpu-debug.txt + properly, please refer Documentation/trace/coresight-cpu-debug.rst for detailed description and the example for usage. endif diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 2463aa7ab4f6..96544b348c27 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -646,24 +646,23 @@ static int debug_remove(struct amba_device *adev) return 0; } +static const struct amba_cs_uci_id uci_id_debug[] = { + { + /* CPU Debug UCI data */ + .devarch = 0x47706a15, + .devarch_mask = 0xfff0ffff, + .devtype = 0x00000015, + } +}; + static const struct amba_id debug_ids[] = { - { /* Debug for Cortex-A53 */ - .id = 0x000bbd03, - .mask = 0x000fffff, - }, - { /* Debug for Cortex-A57 */ - .id = 0x000bbd07, - .mask = 0x000fffff, - }, - { /* Debug for Cortex-A72 */ - .id = 0x000bbd08, - .mask = 0x000fffff, - }, - { /* Debug for Cortex-A73 */ - .id = 0x000bbd09, - .mask = 0x000fffff, - }, - { 0, 0 }, + CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */ + CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */ + CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */ + CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */ + CS_AMBA_UCI_ID(0x000f0205, uci_id_debug), /* Qualcomm Kryo */ + CS_AMBA_UCI_ID(0x000f0211, uci_id_debug), /* Qualcomm Kryo */ + {}, }; static struct amba_driver debug_driver = { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index a0365e23678e..ce41482431f9 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -217,6 +217,7 @@ static ssize_t reset_store(struct device *dev, /* No start-stop filtering for ViewInst */ config->vissctlr = 0x0; + config->vipcssctlr = 0x0; /* Disable seq events */ for (i = 0; i < drvdata->nrseqstate-1; i++) @@ -238,6 +239,7 @@ static ssize_t reset_store(struct device *dev, for (i = 0; i < drvdata->nr_resource; i++) config->res_ctrl[i] = 0x0; + config->ss_idx = 0x0; for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_ctrl[i] = 0x0; config->ss_pe_cmp[i] = 0x0; @@ -297,11 +299,6 @@ static ssize_t mode_store(struct device *dev, spin_lock(&drvdata->spinlock); config->mode = val & ETMv4_MODE_ALL; - if (config->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); - if (drvdata->instrp0 == true) { /* start by clearing instruction P0 field */ config->cfg &= ~(BIT(1) | BIT(2)); @@ -655,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; + + /* mask off max threshold before checking min value */ + val &= ETM_CYC_THRESHOLD_MASK; if (val < drvdata->ccitmin) return -EINVAL; - config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + config->ccctlr = val; return size; } static DEVICE_ATTR_RW(cyc_threshold); @@ -689,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev, return -EINVAL; if (!drvdata->nr_addr_cmp) return -EINVAL; + /* - * Bit[7:0] selects which address range comparator is used for - * branch broadcast control. + * Bit[8] controls include(1) / exclude(0), bits[0-7] select + * individual range comparators. If include then at least 1 + * range must be selected. */ - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0)) return -EINVAL; - config->bb_ctrl = val; + config->bb_ctrl = val & GENMASK(8, 0); return size; } static DEVICE_ATTR_RW(bb_ctrl); @@ -741,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - val = BMVAL(config->vinst_ctrl, 16, 19); + val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16; return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -757,8 +759,8 @@ static ssize_t s_exlevel_vinst_store(struct device *dev, return -EINVAL; spin_lock(&drvdata->spinlock); - /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ - config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); + /* clear all EXLEVEL_S bits */ + config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK); /* enable instruction tracing for corresponding exception level */ val &= drvdata->s_ex_level; config->vinst_ctrl |= (val << 16); @@ -776,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev, struct etmv4_config *config = &drvdata->config; /* EXLEVEL_NS, bits[23:20] */ - val = BMVAL(config->vinst_ctrl, 20, 23); + val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20; return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -792,8 +794,8 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev, return -EINVAL; spin_lock(&drvdata->spinlock); - /* clear EXLEVEL_NS bits (bit[23] is never implemented */ - config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); + /* clear EXLEVEL_NS bits */ + config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK); /* enable instruction tracing for corresponding exception level */ val &= drvdata->ns_ex_level; config->vinst_ctrl |= (val << 20); @@ -969,8 +971,12 @@ static ssize_t addr_range_store(struct device *dev, unsigned long val1, val2; struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; + int elements, exclude; + + elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude); - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + /* exclude is optional, but need at least two parameter */ + if (elements < 2) return -EINVAL; /* lower address comparator cannot have a higher address value */ if (val1 > val2) @@ -998,11 +1004,11 @@ static ssize_t addr_range_store(struct device *dev, /* * Program include or exclude control bits for vinst or vdata * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE + * use supplied value, or default to bit set in 'mode' */ - if (config->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); + if (elements != 3) + exclude = config->mode & ETM_MODE_EXCLUDE; + etm4_set_mode_exclude(drvdata, exclude ? true : false); spin_unlock(&drvdata->spinlock); return size; @@ -1059,8 +1065,6 @@ static ssize_t addr_start_store(struct device *dev, config->addr_val[idx] = (u64)val; config->addr_type[idx] = ETM_ADDR_TYPE_START; config->vissctlr |= BIT(idx); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - config->vinst_ctrl |= BIT(9); spin_unlock(&drvdata->spinlock); return size; } @@ -1116,8 +1120,6 @@ static ssize_t addr_stop_store(struct device *dev, config->addr_val[idx] = (u64)val; config->addr_type[idx] = ETM_ADDR_TYPE_STOP; config->vissctlr |= BIT(idx + 16); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - config->vinst_ctrl |= BIT(9); spin_unlock(&drvdata->spinlock); return size; } @@ -1233,6 +1235,131 @@ static ssize_t addr_context_store(struct device *dev, } static DEVICE_ATTR_RW(addr_context); +static ssize_t addr_exlevel_s_ns_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + val = BMVAL(config->addr_acc[idx], 8, 14); + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_exlevel_s_ns_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val & ~((GENMASK(14, 8) >> 8))) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */ + config->addr_acc[idx] &= ~(GENMASK(14, 8)); + config->addr_acc[idx] |= (val << 8); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_exlevel_s_ns); + +static const char * const addr_type_names[] = { + "unused", + "single", + "range", + "start", + "stop" +}; + +static ssize_t addr_cmp_view_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 idx, addr_type; + unsigned long addr_v, addr_v2, addr_ctrl; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + int size = 0; + bool exclude = false; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + addr_v = config->addr_val[idx]; + addr_ctrl = config->addr_acc[idx]; + addr_type = config->addr_type[idx]; + if (addr_type == ETM_ADDR_TYPE_RANGE) { + if (idx & 0x1) { + idx -= 1; + addr_v2 = addr_v; + addr_v = config->addr_val[idx]; + } else { + addr_v2 = config->addr_val[idx + 1]; + } + exclude = config->viiectlr & BIT(idx / 2 + 16); + } + spin_unlock(&drvdata->spinlock); + if (addr_type) { + size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx, + addr_type_names[addr_type], addr_v); + if (addr_type == ETM_ADDR_TYPE_RANGE) { + size += scnprintf(buf + size, PAGE_SIZE - size, + " %#lx %s", addr_v2, + exclude ? "exclude" : "include"); + } + size += scnprintf(buf + size, PAGE_SIZE - size, + " ctrl(%#lx)\n", addr_ctrl); + } else { + size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx); + } + return size; +} +static DEVICE_ATTR_RO(addr_cmp_view); + +static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (!drvdata->nr_pe_cmp) + return -EINVAL; + val = config->vipcssctlr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!drvdata->nr_pe_cmp) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->vipcssctlr = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop); + static ssize_t seq_idx_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1329,8 +1456,8 @@ static ssize_t seq_event_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->seq_idx; - /* RST, bits[7:0] */ - config->seq_ctrl[idx] = val & 0xFF; + /* Seq control has two masks B[15:8] F[7:0] */ + config->seq_ctrl[idx] = val & 0xFFFF; spin_unlock(&drvdata->spinlock); return size; } @@ -1585,12 +1712,129 @@ static ssize_t res_ctrl_store(struct device *dev, if (idx % 2 != 0) /* PAIRINV, bit[21] */ val &= ~BIT(21); - config->res_ctrl[idx] = val; + config->res_ctrl[idx] = val & GENMASK(21, 0); spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(res_ctrl); +static ssize_t sshot_idx_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ss_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t sshot_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_ss_cmp) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->ss_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(sshot_idx); + +static ssize_t sshot_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->ss_ctrl[config->ss_idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t sshot_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->ss_idx; + config->ss_ctrl[idx] = val & GENMASK(24, 0); + /* must clear bit 31 in related status register on programming */ + config->ss_status[idx] &= ~BIT(31); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(sshot_ctrl); + +static ssize_t sshot_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->ss_status[config->ss_idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(sshot_status); + +static ssize_t sshot_pe_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->ss_pe_cmp[config->ss_idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t sshot_pe_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->ss_idx; + config->ss_pe_cmp[idx] = val & GENMASK(7, 0); + /* must clear bit 31 in related status register on programming */ + config->ss_status[idx] &= ~BIT(31); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(sshot_pe_ctrl); + static ssize_t ctxid_idx_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1719,6 +1963,7 @@ static ssize_t ctxid_masks_store(struct device *dev, unsigned long val1, val2, mask; struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; + int nr_inputs; /* * Don't use contextID tracing if coming from a PID namespace. See @@ -1734,7 +1979,9 @@ static ssize_t ctxid_masks_store(struct device *dev, */ if (!drvdata->ctxid_size || !drvdata->numcidc) return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + /* one mask if <= 4 comparators, two for up to 8 */ + nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2); + if ((drvdata->numcidc > 4) && (nr_inputs != 2)) return -EINVAL; spin_lock(&drvdata->spinlock); @@ -1908,6 +2155,7 @@ static ssize_t vmid_masks_store(struct device *dev, unsigned long val1, val2, mask; struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; + int nr_inputs; /* * only implemented when vmid tracing is enabled, i.e. at least one @@ -1915,7 +2163,9 @@ static ssize_t vmid_masks_store(struct device *dev, */ if (!drvdata->vmid_size || !drvdata->numvmidc) return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + /* one mask if <= 4 comparators, two for up to 8 */ + nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2); + if ((drvdata->numvmidc > 4) && (nr_inputs != 2)) return -EINVAL; spin_lock(&drvdata->spinlock); @@ -2038,6 +2288,13 @@ static struct attribute *coresight_etmv4_attrs[] = { &dev_attr_addr_stop.attr, &dev_attr_addr_ctxtype.attr, &dev_attr_addr_context.attr, + &dev_attr_addr_exlevel_s_ns.attr, + &dev_attr_addr_cmp_view.attr, + &dev_attr_vinst_pe_cmp_start_stop.attr, + &dev_attr_sshot_idx.attr, + &dev_attr_sshot_ctrl.attr, + &dev_attr_sshot_pe_ctrl.attr, + &dev_attr_sshot_status.attr, &dev_attr_seq_idx.attr, &dev_attr_seq_state.attr, &dev_attr_seq_event.attr, diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 7bcac8896fc1..a90d757f7043 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -18,6 +18,7 @@ #include <linux/stat.h> #include <linux/clk.h> #include <linux/cpu.h> +#include <linux/cpu_pm.h> #include <linux/coresight.h> #include <linux/coresight-pmu.h> #include <linux/pm_wakeup.h> @@ -26,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/perf_event.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <asm/sections.h> #include <asm/local.h> #include <asm/virt.h> @@ -34,7 +36,17 @@ #include "coresight-etm-perf.h" static int boot_enable; -module_param_named(boot_enable, boot_enable, int, S_IRUGO); +module_param(boot_enable, int, 0444); +MODULE_PARM_DESC(boot_enable, "Enable tracing on boot"); + +#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */ +#define PARAM_PM_SAVE_NEVER 1 /* never save any state */ +#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */ + +static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE; +module_param(pm_save_enable, int, 0444); +MODULE_PARM_DESC(pm_save_enable, + "Save/restore state on power down: 1 = never, 2 = self-hosted"); /* The number of ETMv4 currently registered */ static int etm4_count; @@ -47,12 +59,20 @@ static enum cpuhp_state hp_online; static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { - /* Writing any value to ETMOSLAR unlocks the trace registers */ + /* Writing 0 to TRCOSLAR unlocks the trace registers */ writel_relaxed(0x0, drvdata->base + TRCOSLAR); drvdata->os_unlock = true; isb(); } +static void etm4_os_lock(struct etmv4_drvdata *drvdata) +{ + /* Writing 0x1 to TRCOSLAR locks the trace registers */ + writel_relaxed(0x1, drvdata->base + TRCOSLAR); + drvdata->os_unlock = false; + isb(); +} + static bool etm4_arch_supported(u8 arch) { /* Mask out the minor version number */ @@ -148,6 +168,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) drvdata->base + TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { + /* always clear status bit on restart if using single-shot */ + if (config->ss_ctrl[i] || config->ss_pe_cmp[i]) + config->ss_status[i] &= ~BIT(31); writel_relaxed(config->ss_ctrl[i], drvdata->base + TRCSSCCRn(i)); writel_relaxed(config->ss_status[i], @@ -188,6 +211,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); + /* + * As recommended by section 4.3.7 ("Synchronization when using the + * memory-mapped interface") of ARM IHI 0064D + */ + dsb(sy); + isb(); + done: CS_LOCK(drvdata->base); @@ -440,6 +470,9 @@ static void etm4_disable_hw(void *info) { u32 control; struct etmv4_drvdata *drvdata = info; + struct etmv4_config *config = &drvdata->config; + struct device *etm_dev = &drvdata->csdev->dev; + int i; CS_UNLOCK(drvdata->base); @@ -453,11 +486,27 @@ static void etm4_disable_hw(void *info) /* EN, bit[0] Trace unit enable bit */ control &= ~0x1; - /* make sure everything completes before disabling */ - mb(); + /* + * Make sure everything completes before disabling, as recommended + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, + * SSTATUS") of ARM IHI 0064D + */ + dsb(sy); isb(); writel_relaxed(control, drvdata->base + TRCPRGCTLR); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ + if (coresight_timeout(drvdata->base, TRCSTATR, + TRCSTATR_PMSTABLE_BIT, 1)) + dev_err(etm_dev, + "timeout while waiting for PM stable Trace Status\n"); + + /* read the status of the single shot comparators */ + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + config->ss_status[i] = + readl_relaxed(drvdata->base + TRCSSCSRn(i)); + } + coresight_disclaim_device_unlocked(drvdata->base); CS_LOCK(drvdata->base); @@ -564,6 +613,7 @@ static void etm4_init_arch_data(void *info) u32 etmidr4; u32 etmidr5; struct etmv4_drvdata *drvdata = info; + int i; /* Make sure all registers are accessible */ etm4_os_unlock(drvdata); @@ -617,6 +667,7 @@ static void etm4_init_arch_data(void *info) * TRCARCHMAJ, bits[11:8] architecture major versin number */ drvdata->arch = BMVAL(etmidr1, 4, 11); + drvdata->config.arch = drvdata->arch; /* maximum size of resources */ etmidr2 = readl_relaxed(drvdata->base + TRCIDR2); @@ -686,9 +737,14 @@ static void etm4_init_arch_data(void *info) drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1; /* * NUMSSCC, bits[23:20] the number of single-shot - * comparator control for tracing + * comparator control for tracing. Read any status regs as these + * also contain RO capability data. */ drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23); + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + drvdata->config.ss_status[i] = + readl_relaxed(drvdata->base + TRCSSCSRn(i)); + } /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */ drvdata->numcidc = BMVAL(etmidr4, 24, 27); /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */ @@ -768,6 +824,7 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config) static u64 etm4_get_access_type(struct etmv4_config *config) { u64 access_type = etm4_get_ns_access_type(config); + u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0; /* * EXLEVEL_S, bits[11:8], don't trace anything happening @@ -775,7 +832,8 @@ static u64 etm4_get_access_type(struct etmv4_config *config) */ access_type |= (ETM_EXLEVEL_S_APP | ETM_EXLEVEL_S_OS | - ETM_EXLEVEL_S_HYP); + s_hyp | + ETM_EXLEVEL_S_MON); return access_type; } @@ -853,6 +911,7 @@ static void etm4_set_default_filter(struct etmv4_config *config) * in the started state */ config->vinst_ctrl |= BIT(9); + config->mode |= ETM_MODE_VIEWINST_STARTSTOP; /* No start-stop filtering for ViewInst */ config->vissctlr = 0x0; @@ -1047,10 +1106,8 @@ static int etm4_starting_cpu(unsigned int cpu) return 0; spin_lock(&etmdrvdata[cpu]->spinlock); - if (!etmdrvdata[cpu]->os_unlock) { + if (!etmdrvdata[cpu]->os_unlock) etm4_os_unlock(etmdrvdata[cpu]); - etmdrvdata[cpu]->os_unlock = true; - } if (local_read(&etmdrvdata[cpu]->mode)) etm4_enable_hw(etmdrvdata[cpu]); @@ -1075,6 +1132,287 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) drvdata->trcid = coresight_get_trace_id(drvdata->cpu); } +static int etm4_cpu_save(struct etmv4_drvdata *drvdata) +{ + int i, ret = 0; + struct etmv4_save_state *state; + struct device *etm_dev = &drvdata->csdev->dev; + + /* + * As recommended by 3.4.1 ("The procedure when powering down the PE") + * of ARM IHI 0064D + */ + dsb(sy); + isb(); + + CS_UNLOCK(drvdata->base); + + /* Lock the OS lock to disable trace and external debugger access */ + etm4_os_lock(drvdata); + + /* wait for TRCSTATR.PMSTABLE to go up */ + if (coresight_timeout(drvdata->base, TRCSTATR, + TRCSTATR_PMSTABLE_BIT, 1)) { + dev_err(etm_dev, + "timeout while waiting for PM Stable Status\n"); + etm4_os_unlock(drvdata); + ret = -EBUSY; + goto out; + } + + state = drvdata->save_state; + + state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR); + state->trcprocselr = readl(drvdata->base + TRCPROCSELR); + state->trcconfigr = readl(drvdata->base + TRCCONFIGR); + state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR); + state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R); + state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R); + state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR); + state->trctsctlr = readl(drvdata->base + TRCTSCTLR); + state->trcsyncpr = readl(drvdata->base + TRCSYNCPR); + state->trcccctlr = readl(drvdata->base + TRCCCCTLR); + state->trcbbctlr = readl(drvdata->base + TRCBBCTLR); + state->trctraceidr = readl(drvdata->base + TRCTRACEIDR); + state->trcqctlr = readl(drvdata->base + TRCQCTLR); + + state->trcvictlr = readl(drvdata->base + TRCVICTLR); + state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR); + state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR); + state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR); + state->trcvdctlr = readl(drvdata->base + TRCVDCTLR); + state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR); + state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR); + + for (i = 0; i < drvdata->nrseqstate; i++) + state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i)); + + state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR); + state->trcseqstr = readl(drvdata->base + TRCSEQSTR); + state->trcextinselr = readl(drvdata->base + TRCEXTINSELR); + + for (i = 0; i < drvdata->nr_cntr; i++) { + state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i)); + state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i)); + state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i)); + } + + for (i = 0; i < drvdata->nr_resource * 2; i++) + state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i)); + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i)); + state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i)); + state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i)); + } + + for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i)); + state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i)); + } + + /* + * Data trace stream is architecturally prohibited for A profile cores + * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per + * section 1.3.4 ("Possible functional configurations of an ETMv4 trace + * unit") of ARM IHI 0064D. + */ + + for (i = 0; i < drvdata->numcidc; i++) + state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i)); + + for (i = 0; i < drvdata->numvmidc; i++) + state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i)); + + state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0); + state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1); + + state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0); + state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1); + + state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR); + + state->trcpdcr = readl(drvdata->base + TRCPDCR); + + /* wait for TRCSTATR.IDLE to go up */ + if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + dev_err(etm_dev, + "timeout while waiting for Idle Trace Status\n"); + etm4_os_unlock(drvdata); + ret = -EBUSY; + goto out; + } + + drvdata->state_needs_restore = true; + + /* + * Power can be removed from the trace unit now. We do this to + * potentially save power on systems that respect the TRCPDCR_PU + * despite requesting software to save/restore state. + */ + writel_relaxed((state->trcpdcr & ~TRCPDCR_PU), + drvdata->base + TRCPDCR); + +out: + CS_LOCK(drvdata->base); + return ret; +} + +static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) +{ + int i; + struct etmv4_save_state *state = drvdata->save_state; + + CS_UNLOCK(drvdata->base); + + writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET); + + writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR); + writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR); + writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR); + writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR); + writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R); + writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R); + writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR); + writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR); + writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR); + writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR); + writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR); + writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR); + writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR); + + writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR); + writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR); + writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR); + writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR); + writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR); + writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR); + writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR); + + for (i = 0; i < drvdata->nrseqstate; i++) + writel_relaxed(state->trcseqevr[i], + drvdata->base + TRCSEQEVRn(i)); + + writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR); + writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR); + writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR); + + for (i = 0; i < drvdata->nr_cntr; i++) { + writel_relaxed(state->trccntrldvr[i], + drvdata->base + TRCCNTRLDVRn(i)); + writel_relaxed(state->trccntctlr[i], + drvdata->base + TRCCNTCTLRn(i)); + writel_relaxed(state->trccntvr[i], + drvdata->base + TRCCNTVRn(i)); + } + + for (i = 0; i < drvdata->nr_resource * 2; i++) + writel_relaxed(state->trcrsctlr[i], + drvdata->base + TRCRSCTLRn(i)); + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + writel_relaxed(state->trcssccr[i], + drvdata->base + TRCSSCCRn(i)); + writel_relaxed(state->trcsscsr[i], + drvdata->base + TRCSSCSRn(i)); + writel_relaxed(state->trcsspcicr[i], + drvdata->base + TRCSSPCICRn(i)); + } + + for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + writel_relaxed(state->trcacvr[i], + drvdata->base + TRCACVRn(i)); + writel_relaxed(state->trcacatr[i], + drvdata->base + TRCACATRn(i)); + } + + for (i = 0; i < drvdata->numcidc; i++) + writel_relaxed(state->trccidcvr[i], + drvdata->base + TRCCIDCVRn(i)); + + for (i = 0; i < drvdata->numvmidc; i++) + writel_relaxed(state->trcvmidcvr[i], + drvdata->base + TRCVMIDCVRn(i)); + + writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0); + writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1); + + writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0); + writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1); + + writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET); + + writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR); + + drvdata->state_needs_restore = false; + + /* + * As recommended by section 4.3.7 ("Synchronization when using the + * memory-mapped interface") of ARM IHI 0064D + */ + dsb(sy); + isb(); + + /* Unlock the OS lock to re-enable trace and external debug access */ + etm4_os_unlock(drvdata); + CS_LOCK(drvdata->base); +} + +static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, + void *v) +{ + struct etmv4_drvdata *drvdata; + unsigned int cpu = smp_processor_id(); + + if (!etmdrvdata[cpu]) + return NOTIFY_OK; + + drvdata = etmdrvdata[cpu]; + + if (!drvdata->save_state) + return NOTIFY_OK; + + if (WARN_ON_ONCE(drvdata->cpu != cpu)) + return NOTIFY_BAD; + + switch (cmd) { + case CPU_PM_ENTER: + /* save the state if self-hosted coresight is in use */ + if (local_read(&drvdata->mode)) + if (etm4_cpu_save(drvdata)) + return NOTIFY_BAD; + break; + case CPU_PM_EXIT: + /* fallthrough */ + case CPU_PM_ENTER_FAILED: + if (drvdata->state_needs_restore) + etm4_cpu_restore(drvdata); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block etm4_cpu_pm_nb = { + .notifier_call = etm4_cpu_pm_notify, +}; + +static int etm4_cpu_pm_register(void) +{ + if (IS_ENABLED(CONFIG_CPU_PM)) + return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + + return 0; +} + +static void etm4_cpu_pm_unregister(void) +{ + if (IS_ENABLED(CONFIG_CPU_PM)) + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); +} + static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { int ret; @@ -1091,6 +1429,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) dev_set_drvdata(dev, drvdata); + if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) + pm_save_enable = coresight_loses_context_with_cpu(dev) ? + PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; + + if (pm_save_enable != PARAM_PM_SAVE_NEVER) { + drvdata->save_state = devm_kmalloc(dev, + sizeof(struct etmv4_save_state), GFP_KERNEL); + if (!drvdata->save_state) + return -ENOMEM; + } + /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) @@ -1125,6 +1474,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) if (ret < 0) goto err_arch_supported; hp_online = ret; + + ret = etm4_cpu_pm_register(); + if (ret) + goto err_arch_supported; } cpus_read_unlock(); @@ -1175,6 +1528,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) err_arch_supported: if (--etm4_count == 0) { + etm4_cpu_pm_unregister(); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online) cpuhp_remove_state_nocalls(hp_online); @@ -1192,11 +1547,16 @@ static struct amba_cs_uci_id uci_id_etm4[] = { }; static const struct amba_id etm4_ids[] = { - CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */ - CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */ - CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */ - CS_AMBA_ID(0x000bb959), /* Cortex-A73 */ - CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */ + CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */ + CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */ + CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */ + CS_AMBA_ID(0x000bb959), /* Cortex-A73 */ + CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */ + CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */ + CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */ + CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */ + CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */ + CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */ {}, }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 4523f10ddd0f..4a695bf90582 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -175,22 +175,28 @@ ETM_MODE_EXCL_USER) #define TRCSTATR_IDLE_BIT 0 +#define TRCSTATR_PMSTABLE_BIT 1 #define ETM_DEFAULT_ADDR_COMP 0 /* PowerDown Control Register bits */ #define TRCPDCR_PU BIT(3) -/* secure state access levels */ +/* secure state access levels - TRCACATRn */ #define ETM_EXLEVEL_S_APP BIT(8) #define ETM_EXLEVEL_S_OS BIT(9) -#define ETM_EXLEVEL_S_NA BIT(10) -#define ETM_EXLEVEL_S_HYP BIT(11) -/* non-secure state access levels */ +#define ETM_EXLEVEL_S_HYP BIT(10) +#define ETM_EXLEVEL_S_MON BIT(11) +/* non-secure state access levels - TRCACATRn */ #define ETM_EXLEVEL_NS_APP BIT(12) #define ETM_EXLEVEL_NS_OS BIT(13) #define ETM_EXLEVEL_NS_HYP BIT(14) #define ETM_EXLEVEL_NS_NA BIT(15) +/* secure / non secure masks - TRCVICTLR, IDR3 */ +#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16) +/* NS MON (EL3) mode never implemented */ +#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20) + /** * struct etmv4_config - configuration information related to an ETMv4 * @mode: Controls various modes supported by this ETM. @@ -221,6 +227,7 @@ * @cntr_val: Sets or returns the value for a counter. * @res_idx: Resource index selector. * @res_ctrl: Controls the selection of the resources in the trace unit. + * @ss_idx: Single-shot index selector. * @ss_ctrl: Controls the corresponding single-shot comparator resource. * @ss_status: The status of the corresponding single-shot comparator. * @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control. @@ -237,6 +244,7 @@ * @vmid_mask0: VM ID comparator mask for comparator 0-3. * @vmid_mask1: VM ID comparator mask for comparator 4-7. * @ext_inp: External input selection. + * @arch: ETM architecture version (for arch dependent config). */ struct etmv4_config { u32 mode; @@ -263,6 +271,7 @@ struct etmv4_config { u32 cntr_val[ETMv4_MAX_CNTR]; u8 res_idx; u32 res_ctrl[ETM_MAX_RES_SEL]; + u8 ss_idx; u32 ss_ctrl[ETM_MAX_SS_CMP]; u32 ss_status[ETM_MAX_SS_CMP]; u32 ss_pe_cmp[ETM_MAX_SS_CMP]; @@ -279,6 +288,66 @@ struct etmv4_config { u32 vmid_mask0; u32 vmid_mask1; u32 ext_inp; + u8 arch; +}; + +/** + * struct etm4_save_state - state to be preserved when ETM is without power + */ +struct etmv4_save_state { + u32 trcprgctlr; + u32 trcprocselr; + u32 trcconfigr; + u32 trcauxctlr; + u32 trceventctl0r; + u32 trceventctl1r; + u32 trcstallctlr; + u32 trctsctlr; + u32 trcsyncpr; + u32 trcccctlr; + u32 trcbbctlr; + u32 trctraceidr; + u32 trcqctlr; + + u32 trcvictlr; + u32 trcviiectlr; + u32 trcvissctlr; + u32 trcvipcssctlr; + u32 trcvdctlr; + u32 trcvdsacctlr; + u32 trcvdarcctlr; + + u32 trcseqevr[ETM_MAX_SEQ_STATES]; + u32 trcseqrstevr; + u32 trcseqstr; + u32 trcextinselr; + u32 trccntrldvr[ETMv4_MAX_CNTR]; + u32 trccntctlr[ETMv4_MAX_CNTR]; + u32 trccntvr[ETMv4_MAX_CNTR]; + + u32 trcrsctlr[ETM_MAX_RES_SEL * 2]; + + u32 trcssccr[ETM_MAX_SS_CMP]; + u32 trcsscsr[ETM_MAX_SS_CMP]; + u32 trcsspcicr[ETM_MAX_SS_CMP]; + + u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP]; + u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP]; + u64 trccidcvr[ETMv4_MAX_CTXID_CMP]; + u32 trcvmidcvr[ETM_MAX_VMID_CMP]; + u32 trccidcctlr0; + u32 trccidcctlr1; + u32 trcvmidcctlr0; + u32 trcvmidcctlr1; + + u32 trcclaimset; + + u32 cntr_val[ETMv4_MAX_CNTR]; + u32 seq_state; + u32 vinst_ctrl; + u32 ss_status[ETM_MAX_SS_CMP]; + + u32 trcpdcr; }; /** @@ -336,6 +405,8 @@ struct etmv4_config { * @atbtrig: If the implementation can support ATB triggers * @lpoverride: If the implementation can support low-power state over. * @config: structure holding configuration parameters. + * @save_state: State to be preserved across power loss + * @state_needs_restore: True when there is context to restore after PM exit */ struct etmv4_drvdata { void __iomem *base; @@ -381,6 +452,8 @@ struct etmv4_drvdata { bool atbtrig; bool lpoverride; struct etmv4_config config; + struct etmv4_save_state *save_state; + bool state_needs_restore; }; /* Address comparator access types */ diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index fa97cb9ab4f9..900690a9f7f0 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -5,6 +5,7 @@ * Description: CoreSight Funnel driver */ +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> @@ -37,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel"); * @atclk: optional clock for the core parts of the funnel. * @csdev: component vitals needed by the framework. * @priority: port selection order. + * @spinlock: serialize enable/disable operations. */ struct funnel_drvdata { void __iomem *base; struct clk *atclk; struct coresight_device *csdev; unsigned long priority; + spinlock_t spinlock; }; static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port) @@ -75,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport, { int rc = 0; struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - if (drvdata->base) - rc = dynamic_funnel_enable_hw(drvdata, inport); - + unsigned long flags; + bool first_enable = false; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_read(&csdev->refcnt[inport]) == 0) { + if (drvdata->base) + rc = dynamic_funnel_enable_hw(drvdata, inport); + if (!rc) + first_enable = true; + } if (!rc) + atomic_inc(&csdev->refcnt[inport]); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + if (first_enable) dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport); return rc; } @@ -106,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool last_disable = false; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_dec_return(&csdev->refcnt[inport]) == 0) { + if (drvdata->base) + dynamic_funnel_disable_hw(drvdata, inport); + last_disable = true; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); - if (drvdata->base) - dynamic_funnel_disable_hw(drvdata, inport); - - dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport); + if (last_disable) + dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport); } static const struct coresight_ops_link funnel_link_ops = { @@ -192,7 +213,7 @@ static int funnel_probe(struct device *dev, struct resource *res) if (is_of_node(dev_fwnode(dev)) && of_device_is_compatible(dev->of_node, "arm,coresight-funnel")) - pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n"); + dev_warn_once(dev, "Uses OBSOLETE CoreSight funnel binding\n"); desc.name = coresight_alloc_device_name(&funnel_devs, dev); if (!desc.name) @@ -232,6 +253,7 @@ static int funnel_probe(struct device *dev, struct resource *res) } dev->platform_data = pdata; + spin_lock_init(&drvdata->spinlock); desc.type = CORESIGHT_DEV_TYPE_LINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; desc.ops = &funnel_cs_ops; @@ -302,11 +324,19 @@ static const struct of_device_id static_funnel_match[] = { {} }; +#ifdef CONFIG_ACPI +static const struct acpi_device_id static_funnel_ids[] = { + {"ARMHC9FE", 0}, + {}, +}; +#endif + static struct platform_driver static_funnel_driver = { .probe = static_funnel_probe, .driver = { .name = "coresight-static-funnel", .of_match_table = static_funnel_match, + .acpi_match_table = ACPI_PTR(static_funnel_ids), .pm = &funnel_dev_pm_ops, .suppress_bind_attrs = true, }, diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index dad7d96c5943..3c5bee429105 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -37,11 +37,6 @@ static int coresight_alloc_conns(struct device *dev, return 0; } -int coresight_device_fwnode_match(struct device *dev, const void *fwnode) -{ - return dev_fwnode(dev) == fwnode; -} - static struct device * coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) { @@ -51,8 +46,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) * If we have a non-configurable replicator, it will be found on the * platform bus. */ - dev = bus_find_device(&platform_bus_type, NULL, - fwnode, coresight_device_fwnode_match); + dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); if (dev) return dev; @@ -60,8 +54,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) * We have a configurable component - circle through the AMBA bus * looking for the device that matches the endpoint node. */ - return bus_find_device(&amba_bustype, NULL, - fwnode, coresight_device_fwnode_match); + return bus_find_device_by_fwnode(&amba_bustype, fwnode); } #ifdef CONFIG_OF diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 7d401790dd7e..82e563cdc879 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -185,11 +185,11 @@ static inline int etm_writel_cp14(u32 off, u32 val) { return 0; } } /* coresight AMBA ID, full UCI structure: id table entry. */ -#define CS_AMBA_UCI_ID(pid, uci_ptr) \ - { \ - .id = pid, \ - .mask = 0x000fffff, \ - .data = uci_ptr \ +#define CS_AMBA_UCI_ID(pid, uci_ptr) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + .data = (void *)uci_ptr \ } /* extract the data value from a UCI structure given amba_id pointer. */ @@ -202,6 +202,4 @@ static inline void *coresight_get_uci_data(const struct amba_id *id) void coresight_release_platform_data(struct coresight_platform_data *pdata); -int coresight_device_fwnode_match(struct device *dev, const void *fwnode); - #endif diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index b7d6d59d56db..e7dc1c31d20d 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator"); * whether this one is programmable or not. * @atclk: optional clock for the core parts of the replicator. * @csdev: component vitals needed by the framework + * @spinlock: serialize enable/disable operations. */ struct replicator_drvdata { void __iomem *base; struct clk *atclk; struct coresight_device *csdev; + spinlock_t spinlock; }; static void dynamic_replicator_reset(struct replicator_drvdata *drvdata) @@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport, { int rc = 0; struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - if (drvdata->base) - rc = dynamic_replicator_enable(drvdata, inport, outport); + unsigned long flags; + bool first_enable = false; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_read(&csdev->refcnt[outport]) == 0) { + if (drvdata->base) + rc = dynamic_replicator_enable(drvdata, inport, + outport); + if (!rc) + first_enable = true; + } if (!rc) + atomic_inc(&csdev->refcnt[outport]); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + if (first_enable) dev_dbg(&csdev->dev, "REPLICATOR enabled\n"); return rc; } @@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport, int outport) { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + unsigned long flags; + bool last_disable = false; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (atomic_dec_return(&csdev->refcnt[outport]) == 0) { + if (drvdata->base) + dynamic_replicator_disable(drvdata, inport, outport); + last_disable = true; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); - if (drvdata->base) - dynamic_replicator_disable(drvdata, inport, outport); - dev_dbg(&csdev->dev, "REPLICATOR disabled\n"); + if (last_disable) + dev_dbg(&csdev->dev, "REPLICATOR disabled\n"); } static const struct coresight_ops_link replicator_link_ops = { @@ -184,7 +207,8 @@ static int replicator_probe(struct device *dev, struct resource *res) if (is_of_node(dev_fwnode(dev)) && of_device_is_compatible(dev->of_node, "arm,coresight-replicator")) - pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n"); + dev_warn_once(dev, + "Uses OBSOLETE CoreSight replicator binding\n"); desc.name = coresight_alloc_device_name(&replicator_devs, dev); if (!desc.name) @@ -224,6 +248,7 @@ static int replicator_probe(struct device *dev, struct resource *res) } dev->platform_data = pdata; + spin_lock_init(&drvdata->spinlock); desc.type = CORESIGHT_DEV_TYPE_LINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; desc.ops = &replicator_cs_ops; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 23b7ff00af5c..d0cc3985b72a 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev) static int tmc_enable_etf_link(struct coresight_device *csdev, int inport, int outport) { - int ret; + int ret = 0; unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + bool first_enable = false; spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { @@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev, return -EBUSY; } - ret = tmc_etf_enable_hw(drvdata); + if (atomic_read(&csdev->refcnt[0]) == 0) { + ret = tmc_etf_enable_hw(drvdata); + if (!ret) { + drvdata->mode = CS_MODE_SYSFS; + first_enable = true; + } + } if (!ret) - drvdata->mode = CS_MODE_SYSFS; + atomic_inc(&csdev->refcnt[0]); spin_unlock_irqrestore(&drvdata->spinlock, flags); - if (!ret) + if (first_enable) dev_dbg(&csdev->dev, "TMC-ETF enabled\n"); return ret; } @@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, { unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + bool last_disable = false; spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { @@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, return; } - tmc_etf_disable_hw(drvdata); - drvdata->mode = CS_MODE_DISABLED; + if (atomic_dec_return(&csdev->refcnt[0]) == 0) { + tmc_etf_disable_hw(drvdata); + drvdata->mode = CS_MODE_DISABLED; + last_disable = true; + } spin_unlock_irqrestore(&drvdata->spinlock, flags); - dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); + if (last_disable) + dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); } static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, @@ -479,30 +491,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev, * traces. */ if (!buf->snapshot && to_read > handle->size) { - u32 mask = 0; - - /* - * The value written to RRP must be byte-address aligned to - * the width of the trace memory databus _and_ to a frame - * boundary (16 byte), whichever is the biggest. For example, - * for 32-bit, 64-bit and 128-bit wide trace memory, the four - * LSBs must be 0s. For 256-bit wide trace memory, the five - * LSBs must be 0s. - */ - switch (drvdata->memwidth) { - case TMC_MEM_INTF_WIDTH_32BITS: - case TMC_MEM_INTF_WIDTH_64BITS: - case TMC_MEM_INTF_WIDTH_128BITS: - mask = GENMASK(31, 4); - break; - case TMC_MEM_INTF_WIDTH_256BITS: - mask = GENMASK(31, 5); - break; - } + u32 mask = tmc_get_memwidth_mask(drvdata); /* * Make sure the new size is aligned in accordance with the - * requirement explained above. + * requirement explained in function tmc_get_memwidth_mask(). */ to_read = handle->size & mask; /* Move the RAM read pointer up */ diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 17006705287a..625882bc8b08 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -871,6 +871,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, return ERR_PTR(rc); } + refcount_set(&etr_buf->refcount, 1); dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n", (unsigned long)size >> 10, etr_buf->mode); return etr_buf; @@ -927,15 +928,24 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) rrp = tmc_read_rrp(drvdata); rwp = tmc_read_rwp(drvdata); status = readl_relaxed(drvdata->base + TMC_STS); + + /* + * If there were memory errors in the session, truncate the + * buffer. + */ + if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) { + dev_dbg(&drvdata->csdev->dev, + "tmc memory error detected, truncating buffer\n"); + etr_buf->len = 0; + etr_buf->full = 0; + return; + } + etr_buf->full = status & TMC_STS_FULL; WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); etr_buf->ops->sync(etr_buf, rrp, rwp); - - /* Insert barrier packets at the beginning, if there was an overflow */ - if (etr_buf->full) - tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); } static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) @@ -1072,6 +1082,13 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) drvdata->sysfs_buf = NULL; } else { tmc_sync_etr_buf(drvdata); + /* + * Insert barrier packets at the beginning, if there was + * an overflow. + */ + if (etr_buf->full) + tmc_etr_buf_insert_barrier_packet(etr_buf, + etr_buf->offset); } } @@ -1263,8 +1280,6 @@ retry: if (IS_ERR(etr_buf)) return etr_buf; - refcount_set(&etr_buf->refcount, 1); - /* Now that we have a buffer, add it to the IDR. */ mutex_lock(&drvdata->idr_mutex); ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); @@ -1291,19 +1306,11 @@ get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata, struct perf_event *event, int nr_pages, void **pages, bool snapshot) { - struct etr_buf *etr_buf; - /* * In per-thread mode the etr_buf isn't shared, so just go ahead * with memory allocation. */ - etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); - if (IS_ERR(etr_buf)) - goto out; - - refcount_set(&etr_buf->refcount, 1); -out: - return etr_buf; + return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); } static struct etr_buf * @@ -1410,10 +1417,12 @@ free_etr_perf_buffer: * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware * buffer to the perf ring buffer. */ -static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) +static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf, + unsigned long src_offset, + unsigned long to_copy) { - long bytes, to_copy; - long pg_idx, pg_offset, src_offset; + long bytes; + long pg_idx, pg_offset; unsigned long head = etr_perf->head; char **dst_pages, *src_buf; struct etr_buf *etr_buf = etr_perf->etr_buf; @@ -1422,8 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) pg_idx = head >> PAGE_SHIFT; pg_offset = head & (PAGE_SIZE - 1); dst_pages = (char **)etr_perf->pages; - src_offset = etr_buf->offset; - to_copy = etr_buf->len; while (to_copy > 0) { /* @@ -1434,6 +1441,8 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) * 3) what is available in the destination page. * in one iteration. */ + if (src_offset >= etr_buf->size) + src_offset -= etr_buf->size; bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy, &src_buf); if (WARN_ON_ONCE(bytes <= 0)) @@ -1454,8 +1463,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) /* Move source pointers */ src_offset += bytes; - if (src_offset >= etr_buf->size) - src_offset -= etr_buf->size; } } @@ -1471,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, void *config) { bool lost = false; - unsigned long flags, size = 0; + unsigned long flags, offset, size = 0; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etr_perf_buffer *etr_perf = config; struct etr_buf *etr_buf = etr_perf->etr_buf; @@ -1484,7 +1491,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, goto out; } - if (WARN_ON(drvdata->perf_data != etr_perf)) { + if (WARN_ON(drvdata->perf_buf != etr_buf)) { lost = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); goto out; @@ -1496,12 +1503,38 @@ tmc_update_etr_buffer(struct coresight_device *csdev, tmc_sync_etr_buf(drvdata); CS_LOCK(drvdata->base); - /* Reset perf specific data */ - drvdata->perf_data = NULL; spin_unlock_irqrestore(&drvdata->spinlock, flags); + lost = etr_buf->full; + offset = etr_buf->offset; size = etr_buf->len; - tmc_etr_sync_perf_buffer(etr_perf); + + /* + * The ETR buffer may be bigger than the space available in the + * perf ring buffer (handle->size). If so advance the offset so that we + * get the latest trace data. In snapshot mode none of that matters + * since we are expected to clobber stale data in favour of the latest + * traces. + */ + if (!etr_perf->snapshot && size > handle->size) { + u32 mask = tmc_get_memwidth_mask(drvdata); + + /* + * Make sure the new size is aligned in accordance with the + * requirement explained in function tmc_get_memwidth_mask(). + */ + size = handle->size & mask; + offset = etr_buf->offset + etr_buf->len - size; + + if (offset >= etr_buf->size) + offset -= etr_buf->size; + lost = true; + } + + /* Insert barrier packets at the beginning, if there was an overflow */ + if (lost) + tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); + tmc_etr_sync_perf_buffer(etr_perf, offset, size); /* * In snapshot mode we simply increment the head by the number of byte @@ -1511,8 +1544,6 @@ tmc_update_etr_buffer(struct coresight_device *csdev, */ if (etr_perf->snapshot) handle->head += size; - - lost |= etr_buf->full; out: /* * Don't set the TRUNCATED flag in snapshot mode because 1) the @@ -1556,7 +1587,6 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) } etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf); - drvdata->perf_data = etr_perf; /* * No HW configuration is needed if the sink is already in @@ -1572,6 +1602,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) /* Associate with monitored process. */ drvdata->pid = pid; drvdata->mode = CS_MODE_PERF; + drvdata->perf_buf = etr_perf->etr_buf; atomic_inc(csdev->refcnt); } @@ -1617,6 +1648,8 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev) /* Dissociate from monitored process. */ drvdata->pid = -1; drvdata->mode = CS_MODE_DISABLED; + /* Reset perf specific data */ + drvdata->perf_buf = NULL; spin_unlock_irqrestore(&drvdata->spinlock, flags); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index be37aff573b4..1cf82fa58289 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -70,6 +70,34 @@ void tmc_disable_hw(struct tmc_drvdata *drvdata) writel_relaxed(0x0, drvdata->base + TMC_CTL); } +u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata) +{ + u32 mask = 0; + + /* + * When moving RRP or an offset address forward, the new values must + * be byte-address aligned to the width of the trace memory databus + * _and_ to a frame boundary (16 byte), whichever is the biggest. For + * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four + * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must + * be 0s. + */ + switch (drvdata->memwidth) { + case TMC_MEM_INTF_WIDTH_32BITS: + /* fallthrough */ + case TMC_MEM_INTF_WIDTH_64BITS: + /* fallthrough */ + case TMC_MEM_INTF_WIDTH_128BITS: + mask = GENMASK(31, 4); + break; + case TMC_MEM_INTF_WIDTH_256BITS: + mask = GENMASK(31, 5); + break; + } + + return mask; +} + static int tmc_read_prepare(struct tmc_drvdata *drvdata) { int ret = 0; @@ -236,6 +264,7 @@ coresight_tmc_reg(ffcr, TMC_FFCR); coresight_tmc_reg(mode, TMC_MODE); coresight_tmc_reg(pscr, TMC_PSCR); coresight_tmc_reg(axictl, TMC_AXICTL); +coresight_tmc_reg(authstatus, TMC_AUTHSTATUS); coresight_tmc_reg(devid, CORESIGHT_DEVID); coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI); coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI); @@ -255,6 +284,7 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = { &dev_attr_devid.attr, &dev_attr_dba.attr, &dev_attr_axictl.attr, + &dev_attr_authstatus.attr, NULL, }; @@ -342,6 +372,13 @@ static inline bool tmc_etr_can_use_sg(struct device *dev) return fwnode_property_present(dev->fwnode, "arm,scatter-gather"); } +static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata) +{ + u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS); + + return (auth & TMC_AUTH_NSID_MASK) == 0x3; +} + /* Detect and initialise the capabilities of a TMC ETR */ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps) { @@ -349,6 +386,9 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps) u32 dma_mask = 0; struct tmc_drvdata *drvdata = dev_get_drvdata(parent); + if (!tmc_etr_has_non_secure_access(drvdata)) + return -EACCES; + /* Set the unadvertised capabilities */ tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps); diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 1ed50411cc3c..71de978575f3 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -39,6 +39,7 @@ #define TMC_ITATBCTR2 0xef0 #define TMC_ITATBCTR1 0xef4 #define TMC_ITATBCTR0 0xef8 +#define TMC_AUTHSTATUS 0xfb8 /* register description */ /* TMC_CTL - 0x020 */ @@ -47,6 +48,7 @@ #define TMC_STS_TMCREADY_BIT 2 #define TMC_STS_FULL BIT(0) #define TMC_STS_TRIGGERED BIT(1) +#define TMC_STS_MEMERR BIT(5) /* * TMC_AXICTL - 0x110 * @@ -89,6 +91,8 @@ #define TMC_DEVID_AXIAW_SHIFT 17 #define TMC_DEVID_AXIAW_MASK 0x7f +#define TMC_AUTH_NSID_MASK GENMASK(1, 0) + enum tmc_config_type { TMC_CONFIG_TYPE_ETB, TMC_CONFIG_TYPE_ETR, @@ -178,8 +182,8 @@ struct etr_buf { * device configuration register (DEVID) * @idr: Holds etr_bufs allocated for this ETR. * @idr_mutex: Access serialisation for idr. - * @perf_data: PERF buffer for ETR. - * @sysfs_data: SYSFS buffer for ETR. + * @sysfs_buf: SYSFS buffer for ETR. + * @perf_buf: PERF buffer for ETR. */ struct tmc_drvdata { void __iomem *base; @@ -202,7 +206,7 @@ struct tmc_drvdata { struct idr idr; struct mutex idr_mutex; struct etr_buf *sysfs_buf; - void *perf_data; + struct etr_buf *perf_buf; }; struct etr_buf_operations { @@ -251,6 +255,7 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata); void tmc_flush_and_stop(struct tmc_drvdata *drvdata); void tmc_enable_hw(struct tmc_drvdata *drvdata); void tmc_disable_hw(struct tmc_drvdata *drvdata); +u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata); /* ETB/ETF functions */ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 55db77f6410b..ef20f74c85fa 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev, struct coresight_device *parent, struct coresight_device *child) { - int ret; + int ret = 0; int link_subtype; - int refport, inport, outport; + int inport, outport; if (!parent || !child) return -EINVAL; @@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev, outport = coresight_find_link_outport(csdev, child); link_subtype = csdev->subtype.link_subtype; - if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) - refport = inport; - else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) - refport = outport; - else - refport = 0; - - if (refport < 0) - return refport; - - if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { - if (link_ops(csdev)->enable) { - ret = link_ops(csdev)->enable(csdev, inport, outport); - if (ret) { - atomic_dec(&csdev->refcnt[refport]); - return ret; - } - } - } + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0) + return inport; + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0) + return outport; - csdev->enable = true; + if (link_ops(csdev)->enable) + ret = link_ops(csdev)->enable(csdev, inport, outport); + if (!ret) + csdev->enable = true; - return 0; + return ret; } static void coresight_disable_link(struct coresight_device *csdev, @@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev, { int i, nr_conns; int link_subtype; - int refport, inport, outport; + int inport, outport; if (!parent || !child) return; @@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev, link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { - refport = inport; nr_conns = csdev->pdata->nr_inport; } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) { - refport = outport; nr_conns = csdev->pdata->nr_outport; } else { - refport = 0; nr_conns = 1; } - if (atomic_dec_return(&csdev->refcnt[refport]) == 0) { - if (link_ops(csdev)->disable) - link_ops(csdev)->disable(csdev, inport, outport); - } + if (link_ops(csdev)->disable) + link_ops(csdev)->disable(csdev, inport, outport); for (i = 0; i < nr_conns; i++) if (atomic_read(&csdev->refcnt[i]) != 0) @@ -1046,9 +1029,7 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) struct coresight_connection *conn = &csdev->pdata->conns[i]; struct device *dev = NULL; - dev = bus_find_device(&coresight_bustype, NULL, - (void *)conn->child_fwnode, - coresight_device_fwnode_match); + dev = bus_find_device_by_fwnode(&coresight_bustype, conn->child_fwnode); if (dev) { conn->child_dev = to_coresight_device(dev); /* and put reference from 'bus_find_device()' */ @@ -1310,6 +1291,12 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict, return -ENOENT; } +bool coresight_loses_context_with_cpu(struct device *dev) +{ + return fwnode_property_present(dev_fwnode(dev), + "arm,coresight-loses-context-with-cpu"); +} + /* * coresight_alloc_device_name - Get an index for a given device in the * device index list specific to a driver. An index is allocated for a |