diff options
Diffstat (limited to 'drivers/hwtracing')
-rw-r--r-- | drivers/hwtracing/coresight/Kconfig | 24 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/Makefile | 1 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-core.c | 31 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm-perf.c | 123 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x-core.c | 162 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x-sysfs.c | 19 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm4x.h | 83 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-platform.c | 6 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-priv.h | 3 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-trbe.c | 1157 | ||||
-rw-r--r-- | drivers/hwtracing/coresight/coresight-trbe.h | 152 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/core.c | 2 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/gth.c | 4 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/intel_th.h | 8 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/msu.c | 2 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/pci.c | 12 | ||||
-rw-r--r-- | drivers/hwtracing/intel_th/pti.c | 4 | ||||
-rw-r--r-- | drivers/hwtracing/stm/p_sys-t.c | 6 | ||||
-rw-r--r-- | drivers/hwtracing/stm/policy.c | 5 |
19 files changed, 1727 insertions, 77 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 7b44ba22cbe1..84530fd80998 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -97,15 +97,15 @@ config CORESIGHT_SOURCE_ETM3X module will be called coresight-etm3x. config CORESIGHT_SOURCE_ETM4X - tristate "CoreSight Embedded Trace Macrocell 4.x driver" + tristate "CoreSight ETMv4.x / ETE driver" depends on ARM64 select CORESIGHT_LINKS_AND_SINKS select PID_IN_CONTEXTIDR help - This driver provides support for the ETM4.x tracer module, tracing the - instructions that a processor is executing. This is primarily useful - for instruction level tracing. Depending on the implemented version - data tracing may also be available. + This driver provides support for the CoreSight Embedded Trace Macrocell + version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer + modules, tracing the instructions that a processor is executing. This is + primarily useful for instruction level tracing. To compile this driver as a module, choose M here: the module will be called coresight-etm4x. @@ -173,4 +173,18 @@ config CORESIGHT_CTI_INTEGRATION_REGS CTI trigger connections between this and other devices.These registers are not used in normal operation and can leave devices in an inconsistent state. + +config CORESIGHT_TRBE + tristate "Trace Buffer Extension (TRBE) driver" + depends on ARM64 && CORESIGHT_SOURCE_ETM4X + help + This driver provides support for percpu Trace Buffer Extension (TRBE). + TRBE always needs to be used along with it's corresponding percpu ETE + component. ETE generates trace data which is then captured with TRBE. + Unlike traditional sink devices, TRBE is a CPU feature accessible via + system registers. But it's explicit dependency with trace unit (ETE) + requires it to be plugged in as a coresight sink device. + + To compile this driver as a module, choose M here: the module will be + called coresight-trbe. endif diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index f20e357758d1..d60816509755 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -21,5 +21,6 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o +obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \ coresight-cti-sysfs.o diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 0062c8935653..6c68d34d956e 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -23,6 +23,7 @@ #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); +static DEFINE_PER_CPU(struct coresight_device *, csdev_sink); /** * struct coresight_node - elements of a path, from source to sink @@ -70,6 +71,18 @@ void coresight_remove_cti_ops(void) } EXPORT_SYMBOL_GPL(coresight_remove_cti_ops); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev) +{ + per_cpu(csdev_sink, cpu) = csdev; +} +EXPORT_SYMBOL_GPL(coresight_set_percpu_sink); + +struct coresight_device *coresight_get_percpu_sink(int cpu) +{ + return per_cpu(csdev_sink, cpu); +} +EXPORT_SYMBOL_GPL(coresight_get_percpu_sink); + static int coresight_id_match(struct device *dev, void *data) { int trace_id, i_trace_id; @@ -86,7 +99,7 @@ static int coresight_id_match(struct device *dev, void *data) i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE) return 0; - /* Get the source ID for both compoment */ + /* Get the source ID for both components */ trace_id = source_ops(csdev)->trace_id(csdev); i_trace_id = source_ops(i_csdev)->trace_id(i_csdev); @@ -784,6 +797,14 @@ static int _coresight_build_path(struct coresight_device *csdev, if (csdev == sink) goto out; + if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) && + sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) { + if (_coresight_build_path(sink, sink, path) == 0) { + found = true; + goto out; + } + } + /* Not a sink - recursively explore each port found on this element */ for (i = 0; i < csdev->pdata->nr_outport; i++) { struct coresight_device *child_dev; @@ -999,8 +1020,12 @@ coresight_find_default_sink(struct coresight_device *csdev) int depth = 0; /* look for a default sink if we have not found for this device */ - if (!csdev->def_sink) - csdev->def_sink = coresight_find_sink(csdev, &depth); + if (!csdev->def_sink) { + if (coresight_is_percpu_source(csdev)) + csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev)); + if (!csdev->def_sink) + csdev->def_sink = coresight_find_sink(csdev, &depth); + } return csdev->def_sink; } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 0f603b4094f2..6f398377fec9 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -24,7 +24,26 @@ static struct pmu etm_pmu; static bool etm_perf_up; -static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); +/* + * An ETM context for a running event includes the perf aux handle + * and aux_data. For ETM, the aux_data (etm_event_data), consists of + * the trace path and the sink configuration. The event data is accessible + * via perf_get_aux(handle). However, a sink could "end" a perf output + * handle via the IRQ handler. And if the "sink" encounters a failure + * to "begin" another session (e.g due to lack of space in the buffer), + * the handle will be cleared. Thus, the event_data may not be accessible + * from the handle when we get to the etm_event_stop(), which is required + * for stopping the trace path. The event_data is guaranteed to stay alive + * until "free_aux()", which cannot happen as long as the event is active on + * the ETM. Thus the event_data for the session must be part of the ETM context + * to make sure we can disable the trace path. + */ +struct etm_ctxt { + struct perf_output_handle handle; + struct etm_event_data *event_data; +}; + +static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); static DEFINE_PER_CPU(struct coresight_device *, csdev_src); /* @@ -52,13 +71,13 @@ static ssize_t format_attr_contextid_show(struct device *dev, { int pid_fmt = ETM_OPT_CTXTID; -#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X) +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID; #endif return sprintf(page, "config:%d\n", pid_fmt); } -struct device_attribute format_attr_contextid = +static struct device_attribute format_attr_contextid = __ATTR(contextid, 0444, format_attr_contextid_show, NULL); static struct attribute *etm_config_formats_attr[] = { @@ -232,6 +251,25 @@ static void etm_free_aux(void *data) schedule_work(&event_data->work); } +/* + * Check if two given sinks are compatible with each other, + * so that they can use the same sink buffers, when an event + * moves around. + */ +static bool sinks_compatible(struct coresight_device *a, + struct coresight_device *b) +{ + if (!a || !b) + return false; + /* + * If the sinks are of the same subtype and driven + * by the same driver, we can use the same buffer + * on these sinks. + */ + return (a->subtype.sink_subtype == b->subtype.sink_subtype) && + (sink_ops(a) == sink_ops(b)); +} + static void *etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { @@ -239,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, int cpu = event->cpu; cpumask_t *mask; struct coresight_device *sink = NULL; + struct coresight_device *user_sink = NULL, *last_sink = NULL; struct etm_event_data *event_data = NULL; event_data = alloc_event_data(cpu); @@ -249,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, /* First get the selected sink from user space. */ if (event->attr.config2) { id = (u32)event->attr.config2; - sink = coresight_get_sink_by_id(id); + sink = user_sink = coresight_get_sink_by_id(id); } mask = &event_data->mask; @@ -277,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, } /* - * No sink provided - look for a default sink for one of the - * devices. At present we only support topology where all CPUs - * use the same sink [N:1], so only need to find one sink. The - * coresight_build_path later will remove any CPU that does not - * attach to the sink, or if we have not found a sink. + * No sink provided - look for a default sink for all the ETMs, + * where this event can be scheduled. + * We allocate the sink specific buffers only once for this + * event. If the ETMs have different default sink devices, we + * can only use a single "type" of sink as the event can carry + * only one sink specific buffer. Thus we have to make sure + * that the sinks are of the same type and driven by the same + * driver, as the one we allocate the buffer for. As such + * we choose the first sink and check if the remaining ETMs + * have a compatible default sink. We don't trace on a CPU + * if the sink is not compatible. */ - if (!sink) + if (!user_sink) { + /* Find the default sink for this ETM */ sink = coresight_find_default_sink(csdev); + if (!sink) { + cpumask_clear_cpu(cpu, mask); + continue; + } + + /* Check if this sink compatible with the last sink */ + if (last_sink && !sinks_compatible(last_sink, sink)) { + cpumask_clear_cpu(cpu, mask); + continue; + } + last_sink = sink; + } /* * Building a path doesn't enable it, it simply builds a @@ -312,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) goto err; - /* Allocate the sink buffer for this session */ + /* + * Allocate the sink buffer for this session. All the sinks + * where this event can be scheduled are ensured to be of the + * same type. Thus the same sink configuration is used by the + * sinks. + */ event_data->snk_config = sink_ops(sink)->alloc_buffer(sink, event, pages, nr_pages, overwrite); @@ -332,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags) { int cpu = smp_processor_id(); struct etm_event_data *event_data; - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); + struct perf_output_handle *handle = &ctxt->handle; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct list_head *path; if (!csdev) goto fail; + /* Have we messed up our tracking ? */ + if (WARN_ON(ctxt->event_data)) + goto fail; + /* * Deal with the ring buffer API and get a handle on the * session's information. @@ -374,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags) if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) goto fail_disable_path; + /* Save the event_data for this ETM */ + ctxt->event_data = event_data; out: return; @@ -392,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode) int cpu = smp_processor_id(); unsigned long size; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); - struct etm_event_data *event_data = perf_get_aux(handle); + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); + struct perf_output_handle *handle = &ctxt->handle; + struct etm_event_data *event_data; struct list_head *path; + /* + * If we still have access to the event_data via handle, + * confirm that we haven't messed up the tracking. + */ + if (handle->event && + WARN_ON(perf_get_aux(handle) != ctxt->event_data)) + return; + + event_data = ctxt->event_data; + /* Clear the event_data as this ETM is stopping the trace. */ + ctxt->event_data = NULL; + if (event->hw.state == PERF_HES_STOPPED) return; + /* We must have a valid event_data for a running event */ + if (WARN_ON(!event_data)) + return; + if (!csdev) return; @@ -416,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode) /* tell the core */ event->hw.state = PERF_HES_STOPPED; - if (mode & PERF_EF_UPDATE) { + /* + * If the handle is not bound to an event anymore + * (e.g, the sink driver was unable to restart the + * handle due to lack of buffer space), we don't + * have to do anything here. + */ + if (handle->event && (mode & PERF_EF_UPDATE)) { if (WARN_ON_ONCE(handle->event != event)) return; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 15016f757828..db881993c211 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/property.h> +#include <asm/barrier.h> #include <asm/sections.h> #include <asm/sysreg.h> #include <asm/local.h> @@ -114,30 +115,91 @@ void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) } } -static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa) +static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit) { - /* Writing 0 to TRCOSLAR unlocks the trace registers */ - etm4x_relaxed_write32(csa, 0x0, TRCOSLAR); - drvdata->os_unlock = true; + u64 res = 0; + + switch (offset) { + ETE_READ_CASES(res) + default : + pr_warn_ratelimited("ete: trying to read unsupported register @%x\n", + offset); + } + + if (!_relaxed) + __iormb(res); /* Imitate the !relaxed I/O helpers */ + + return res; +} + +static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) +{ + if (!_relaxed) + __iowmb(); /* Imitate the !relaxed I/O helpers */ + if (!_64bit) + val &= GENMASK(31, 0); + + switch (offset) { + ETE_WRITE_CASES(val) + default : + pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n", + offset); + } +} + +static void etm_detect_os_lock(struct etmv4_drvdata *drvdata, + struct csdev_access *csa) +{ + u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR); + + drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr); +} + +static void etm_write_os_lock(struct etmv4_drvdata *drvdata, + struct csdev_access *csa, u32 val) +{ + val = !!val; + + switch (drvdata->os_lock_model) { + case ETM_OSLOCK_PRESENT: + etm4x_relaxed_write32(csa, val, TRCOSLAR); + break; + case ETM_OSLOCK_PE: + write_sysreg_s(val, SYS_OSLAR_EL1); + break; + default: + pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n", + smp_processor_id(), drvdata->os_lock_model); + fallthrough; + case ETM_OSLOCK_NI: + return; + } isb(); } +static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, + struct csdev_access *csa) +{ + WARN_ON(drvdata->cpu != smp_processor_id()); + + /* Writing 0 to OS Lock unlocks the trace unit registers */ + etm_write_os_lock(drvdata, csa, 0x0); + drvdata->os_unlock = true; +} + static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { if (!WARN_ON(!drvdata->csdev)) etm4_os_unlock_csa(drvdata, &drvdata->csdev->access); - } static void etm4_os_lock(struct etmv4_drvdata *drvdata) { if (WARN_ON(!drvdata->csdev)) return; - - /* Writing 0x1 to TRCOSLAR locks the trace registers */ - etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR); + /* Writing 0x1 to OS Lock locks the trace registers */ + etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1); drvdata->os_unlock = false; - isb(); } static void etm4_cs_lock(struct etmv4_drvdata *drvdata, @@ -371,6 +433,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR); } + /* + * ETE mandates that the TRCRSR is written to before + * enabling it. + */ + if (etm4x_is_ete(drvdata)) + etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR); + /* Enable the trace unit */ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR); @@ -654,6 +723,7 @@ static int etm4_enable(struct coresight_device *csdev, static void etm4_disable_hw(void *info) { u32 control; + u64 trfcr; struct etmv4_drvdata *drvdata = info; struct etmv4_config *config = &drvdata->config; struct coresight_device *csdev = drvdata->csdev; @@ -677,18 +747,32 @@ static void etm4_disable_hw(void *info) control &= ~0x1; /* + * If the CPU supports v8.4 Trace filter Control, + * set the ETM to trace prohibited region. + */ + if (drvdata->trfc) { + trfcr = read_sysreg_s(SYS_TRFCR_EL1); + write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE), + SYS_TRFCR_EL1); + isb(); + } + /* * Make sure everything completes before disabling, as recommended * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, * SSTATUS") of ARM IHI 0064D */ dsb(sy); isb(); + /* Trace synchronization barrier, is a nop if not supported */ + tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR); /* wait for TRCSTATR.PMSTABLE to go to '1' */ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + if (drvdata->trfc) + write_sysreg_s(trfcr, SYS_TRFCR_EL1); /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -817,13 +901,24 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata, * ETMs implementing sysreg access must implement TRCDEVARCH. */ devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH); - if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) + switch (devarch & ETM_DEVARCH_ID_MASK) { + case ETM_DEVARCH_ETMv4x_ARCH: + *csa = (struct csdev_access) { + .io_mem = false, + .read = etm4x_sysreg_read, + .write = etm4x_sysreg_write, + }; + break; + case ETM_DEVARCH_ETE_ARCH: + *csa = (struct csdev_access) { + .io_mem = false, + .read = ete_sysreg_read, + .write = ete_sysreg_write, + }; + break; + default: return false; - *csa = (struct csdev_access) { - .io_mem = false, - .read = etm4x_sysreg_read, - .write = etm4x_sysreg_write, - }; + } drvdata->arch = etm_devarch_to_arch(devarch); return true; @@ -873,7 +968,7 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata, return false; } -static void cpu_enable_tracing(void) +static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) { u64 dfr0 = read_sysreg(id_aa64dfr0_el1); u64 trfcr; @@ -881,6 +976,7 @@ static void cpu_enable_tracing(void) if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT)) return; + drvdata->trfc = true; /* * If the CPU supports v8.4 SelfHosted Tracing, enable * tracing at the kernel EL and EL0, forcing to use the @@ -920,6 +1016,9 @@ static void etm4_init_arch_data(void *info) if (!etm4_init_csdev_access(drvdata, csa)) return; + /* Detect the support for OS Lock before we actually use it */ + etm_detect_os_lock(drvdata, csa); + /* Make sure all registers are accessible */ etm4_os_unlock_csa(drvdata, csa); etm4_cs_unlock(drvdata, csa); @@ -1082,7 +1181,7 @@ static void etm4_init_arch_data(void *info) /* NUMCNTR, bits[30:28] number of counters available for tracing */ drvdata->nr_cntr = BMVAL(etmidr5, 28, 30); etm4_cs_lock(drvdata, csa); - cpu_enable_tracing(); + cpu_enable_tracing(drvdata); } static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config) @@ -1760,6 +1859,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) struct etmv4_drvdata *drvdata; struct coresight_desc desc = { 0 }; struct etm4_init_arg init_arg = { 0 }; + u8 major, minor; + char *type_name; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) @@ -1786,10 +1887,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) if (drvdata->cpu < 0) return drvdata->cpu; - desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu); - if (!desc.name) - return -ENOMEM; - init_arg.drvdata = drvdata; init_arg.csa = &desc.access; init_arg.pid = etm_pid; @@ -1806,6 +1903,22 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up")) drvdata->skip_power_up = true; + major = ETM_ARCH_MAJOR_VERSION(drvdata->arch); + minor = ETM_ARCH_MINOR_VERSION(drvdata->arch); + + if (etm4x_is_ete(drvdata)) { + type_name = "ete"; + /* ETE v1 has major version == 0b101. Adjust this for logging.*/ + major -= 4; + } else { + type_name = "etm"; + } + + desc.name = devm_kasprintf(dev, GFP_KERNEL, + "%s%d", type_name, drvdata->cpu); + if (!desc.name) + return -ENOMEM; + etm4_init_trace_id(drvdata); etm4_set_default(&drvdata->config); @@ -1833,9 +1946,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) etmdrvdata[drvdata->cpu] = drvdata; - dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n", - drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch), - ETM_ARCH_MINOR_VERSION(drvdata->arch)); + dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n", + drvdata->cpu, type_name, major, minor); if (boot_enable) { coresight_enable(drvdata->csdev); @@ -1951,6 +2063,7 @@ static const struct amba_id etm4_ids[] = { CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */ CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */ CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */ + CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */ CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */ CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */ CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */ @@ -1978,6 +2091,7 @@ static struct amba_driver etm4x_amba_driver = { static const struct of_device_id etm4_sysreg_match[] = { { .compatible = "arm,coresight-etm4x-sysreg" }, + { .compatible = "arm,embedded-trace-extension" }, {} }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 0995a10790f4..007bad9e7ad8 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -2374,12 +2374,20 @@ static inline bool etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset) { switch (offset) { - ETM4x_SYSREG_LIST_CASES + ETM_COMMON_SYSREG_LIST_CASES /* - * Registers accessible via system instructions are always - * implemented. + * Common registers to ETE & ETM4x accessible via system + * instructions are always implemented. */ return true; + + ETM4x_ONLY_SYSREG_LIST_CASES + /* + * We only support etm4x and ete. So if the device is not + * ETE, it must be ETMv4x. + */ + return !etm4x_is_ete(drvdata); + ETM4x_MMAP_LIST_CASES /* * Registers accessible only via memory-mapped registers @@ -2389,8 +2397,13 @@ etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset) * coresight_register() and the csdev is not initialized * until that is done. So rely on the drvdata->base to * detect if we have a memory mapped access. + * Also ETE doesn't implement memory mapped access, thus + * it is sufficient to check that we are using mmio. */ return !!drvdata->base; + + ETE_ONLY_SYSREG_LIST_CASES + return etm4x_is_ete(drvdata); } return false; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 0af60571aa23..e5b79bdb9851 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -29,6 +29,7 @@ #define TRCAUXCTLR 0x018 #define TRCEVENTCTL0R 0x020 #define TRCEVENTCTL1R 0x024 +#define TRCRSR 0x028 #define TRCSTALLCTLR 0x02C #define TRCTSCTLR 0x030 #define TRCSYNCPR 0x034 @@ -49,6 +50,7 @@ #define TRCSEQRSTEVR 0x118 #define TRCSEQSTR 0x11C #define TRCEXTINSELR 0x120 +#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */ #define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */ #define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */ #define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */ @@ -126,6 +128,8 @@ #define TRCCIDR2 0xFF8 #define TRCCIDR3 0xFFC +#define TRCRSR_TA BIT(12) + /* * System instructions to access ETM registers. * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions @@ -160,10 +164,22 @@ #define CASE_NOP(__unused, x) \ case (x): /* fall through */ +#define ETE_ONLY_SYSREG_LIST(op, val) \ + CASE_##op((val), TRCRSR) \ + CASE_##op((val), TRCEXTINSELRn(1)) \ + CASE_##op((val), TRCEXTINSELRn(2)) \ + CASE_##op((val), TRCEXTINSELRn(3)) + /* List of registers accessible via System instructions */ -#define ETM_SYSREG_LIST(op, val) \ - CASE_##op((val), TRCPRGCTLR) \ +#define ETM4x_ONLY_SYSREG_LIST(op, val) \ CASE_##op((val), TRCPROCSELR) \ + CASE_##op((val), TRCVDCTLR) \ + CASE_##op((val), TRCVDSACCTLR) \ + CASE_##op((val), TRCVDARCCTLR) \ + CASE_##op((val), TRCOSLAR) + +#define ETM_COMMON_SYSREG_LIST(op, val) \ + CASE_##op((val), TRCPRGCTLR) \ CASE_##op((val), TRCSTATR) \ CASE_##op((val), TRCCONFIGR) \ CASE_##op((val), TRCAUXCTLR) \ @@ -180,9 +196,6 @@ CASE_##op((val), TRCVIIECTLR) \ CASE_##op((val), TRCVISSCTLR) \ CASE_##op((val), TRCVIPCSSCTLR) \ - CASE_##op((val), TRCVDCTLR) \ - CASE_##op((val), TRCVDSACCTLR) \ - CASE_##op((val), TRCVDARCCTLR) \ CASE_##op((val), TRCSEQEVRn(0)) \ CASE_##op((val), TRCSEQEVRn(1)) \ CASE_##op((val), TRCSEQEVRn(2)) \ @@ -277,7 +290,6 @@ CASE_##op((val), TRCSSPCICRn(5)) \ CASE_##op((val), TRCSSPCICRn(6)) \ CASE_##op((val), TRCSSPCICRn(7)) \ - CASE_##op((val), TRCOSLAR) \ CASE_##op((val), TRCOSLSR) \ CASE_##op((val), TRCACVRn(0)) \ CASE_##op((val), TRCACVRn(1)) \ @@ -369,12 +381,38 @@ CASE_##op((val), TRCPIDR2) \ CASE_##op((val), TRCPIDR3) -#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res)) -#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val)) +#define ETM4x_READ_SYSREG_CASES(res) \ + ETM_COMMON_SYSREG_LIST(READ, (res)) \ + ETM4x_ONLY_SYSREG_LIST(READ, (res)) + +#define ETM4x_WRITE_SYSREG_CASES(val) \ + ETM_COMMON_SYSREG_LIST(WRITE, (val)) \ + ETM4x_ONLY_SYSREG_LIST(WRITE, (val)) + +#define ETM_COMMON_SYSREG_LIST_CASES \ + ETM_COMMON_SYSREG_LIST(NOP, __unused) + +#define ETM4x_ONLY_SYSREG_LIST_CASES \ + ETM4x_ONLY_SYSREG_LIST(NOP, __unused) + +#define ETM4x_SYSREG_LIST_CASES \ + ETM_COMMON_SYSREG_LIST_CASES \ + ETM4x_ONLY_SYSREG_LIST(NOP, __unused) -#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused) #define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused) +/* ETE only supports system register access */ +#define ETE_READ_CASES(res) \ + ETM_COMMON_SYSREG_LIST(READ, (res)) \ + ETE_ONLY_SYSREG_LIST(READ, (res)) + +#define ETE_WRITE_CASES(val) \ + ETM_COMMON_SYSREG_LIST(WRITE, (val)) \ + ETE_ONLY_SYSREG_LIST(WRITE, (val)) + +#define ETE_ONLY_SYSREG_LIST_CASES \ + ETE_ONLY_SYSREG_LIST(NOP, __unused) + #define read_etm4x_sysreg_offset(offset, _64bit) \ ({ \ u64 __val; \ @@ -506,6 +544,20 @@ ETM_MODE_EXCL_USER) /* + * TRCOSLSR.OSLM advertises the OS Lock model. + * OSLM[2:0] = TRCOSLSR[4:3,0] + * + * 0b000 - Trace OS Lock is not implemented. + * 0b010 - Trace OS Lock is implemented. + * 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock. + */ +#define ETM_OSLOCK_NI 0b000 +#define ETM_OSLOCK_PRESENT 0b010 +#define ETM_OSLOCK_PE 0b100 + +#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1)) + +/* * TRCDEVARCH Bit field definitions * Bits[31:21] - ARCHITECT = Always Arm Ltd. * * Bits[31:28] = 0x4 @@ -541,11 +593,14 @@ ((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13)) #define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4) +#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5) #define ETM_DEVARCH_ID_MASK \ (ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT) #define ETM_DEVARCH_ETMv4x_ARCH \ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT) +#define ETM_DEVARCH_ETE_ARCH \ + (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT) #define TRCSTATR_IDLE_BIT 0 #define TRCSTATR_PMSTABLE_BIT 1 @@ -635,6 +690,8 @@ #define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU) #define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0) +#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0) + /* Interpretation of resource numbers change at ETM v4.3 architecture */ #define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3) @@ -862,6 +919,7 @@ struct etmv4_save_state { * @nooverflow: Indicate if overflow prevention is supported. * @atbtrig: If the implementation can support ATB triggers * @lpoverride: If the implementation can support low-power state over. + * @trfc: If the implementation supports Arm v8.4 trace filter controls. * @config: structure holding configuration parameters. * @save_state: State to be preserved across power loss * @state_needs_restore: True when there is context to restore after PM exit @@ -897,6 +955,7 @@ struct etmv4_drvdata { u8 s_ex_level; u8 ns_ex_level; u8 q_support; + u8 os_lock_model; bool sticky_enable; bool boot_enable; bool os_unlock; @@ -912,6 +971,7 @@ struct etmv4_drvdata { bool nooverflow; bool atbtrig; bool lpoverride; + bool trfc; struct etmv4_config config; struct etmv4_save_state *save_state; bool state_needs_restore; @@ -940,4 +1000,9 @@ void etm4_config_trace_mode(struct etmv4_config *config); u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit); void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit); + +static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata) +{ + return drvdata->arch >= ETM_ARCH_ETE; +} #endif diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index 3629b7885aca..c594f45319fc 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node, struct of_endpoint endpoint; int in = 0, out = 0; + /* + * Avoid warnings in of_graph_get_next_endpoint() + * if the device doesn't have any graph connections + */ + if (!of_graph_is_present(node)) + return; do { ep = of_graph_get_next_endpoint(node, ep); if (!ep) diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index f5f654ea2994..ff1dd2092ac5 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -232,4 +232,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode); void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev, struct coresight_device *ect_csdev); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev); +struct coresight_device *coresight_get_percpu_sink(int cpu); + #endif diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c new file mode 100644 index 000000000000..176868496879 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight + * sink device could then pair with an appropriate per-cpu coresight source + * device (ETE) thus generating required trace data. Trace can be enabled + * via the perf framework. + * + * The AUX buffer handling is inspired from Arm SPE PMU driver. + * + * Copyright (C) 2020 ARM Ltd. + * + * Author: Anshuman Khandual <anshuman.khandual@arm.com> + */ +#define DRVNAME "arm_trbe" + +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <asm/barrier.h> +#include "coresight-trbe.h" + +#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) + +/* + * A padding packet that will help the user space tools + * in skipping relevant sections in the captured trace + * data which could not be decoded. TRBE doesn't support + * formatting the trace data, unlike the legacy CoreSight + * sinks and thus we use ETE trace packets to pad the + * sections of the buffer. + */ +#define ETE_IGNORE_PACKET 0x70 + +/* + * Minimum amount of meaningful trace will contain: + * A-Sync, Trace Info, Trace On, Address, Atom. + * This is about 44bytes of ETE trace. To be on + * the safer side, we assume 64bytes is the minimum + * space required for a meaningful session, before + * we hit a "WRAP" event. + */ +#define TRBE_TRACE_MIN_BUF_SIZE 64 + +enum trbe_fault_action { + TRBE_FAULT_ACT_WRAP, + TRBE_FAULT_ACT_SPURIOUS, + TRBE_FAULT_ACT_FATAL, +}; + +struct trbe_buf { + /* + * Even though trbe_base represents vmap() + * mapped allocated buffer's start address, + * it's being as unsigned long for various + * arithmetic and comparision operations & + * also to be consistent with trbe_write & + * trbe_limit sibling pointers. + */ + unsigned long trbe_base; + unsigned long trbe_limit; + unsigned long trbe_write; + int nr_pages; + void **pages; + bool snapshot; + struct trbe_cpudata *cpudata; +}; + +struct trbe_cpudata { + bool trbe_flag; + u64 trbe_align; + int cpu; + enum cs_mode mode; + struct trbe_buf *buf; + struct trbe_drvdata *drvdata; +}; + +struct trbe_drvdata { + struct trbe_cpudata __percpu *cpudata; + struct perf_output_handle * __percpu *handle; + struct hlist_node hotplug_node; + int irq; + cpumask_t supported_cpus; + enum cpuhp_state trbe_online; + struct platform_device *pdev; +}; + +static int trbe_alloc_node(struct perf_event *event) +{ + if (event->cpu == -1) + return NUMA_NO_NODE; + return cpu_to_node(event->cpu); +} + +static void trbe_drain_buffer(void) +{ + tsb_csync(); + dsb(nsh); +} + +static void trbe_drain_and_disable_local(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + trbe_drain_buffer(); + + /* + * Disable the TRBE without clearing LIMITPTR which + * might be required for fetching the buffer limits. + */ + trblimitr &= ~TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + isb(); +} + +static void trbe_reset_local(void) +{ + trbe_drain_and_disable_local(); + write_sysreg_s(0, SYS_TRBLIMITR_EL1); + write_sysreg_s(0, SYS_TRBPTR_EL1); + write_sysreg_s(0, SYS_TRBBASER_EL1); + write_sysreg_s(0, SYS_TRBSR_EL1); +} + +static void trbe_stop_and_truncate_event(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + /* + * We cannot proceed with the buffer collection and we + * do not have any data for the current session. The + * etm_perf driver expects to close out the aux_buffer + * at event_stop(). So disable the TRBE here and leave + * the update_buffer() to return a 0 size. + */ + trbe_drain_and_disable_local(); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; +} + +/* + * TRBE Buffer Management + * + * The TRBE buffer spans from the base pointer till the limit pointer. When enabled, + * it starts writing trace data from the write pointer onward till the limit pointer. + * When the write pointer reaches the address just before the limit pointer, it gets + * wrapped around again to the base pointer. This is called a TRBE wrap event, which + * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver + * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ + * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and + * LIMIT pointers. + * + * Wrap around with an IRQ + * ------ < ------ < ------- < ----- < ----- + * | | + * ------ > ------ > ------- > ----- > ----- + * + * +---------------+-----------------------+ + * | | | + * +---------------+-----------------------+ + * Base Pointer Write Pointer Limit Pointer + * + * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write + * pointer can be aligned to the implementation defined TRBE trace buffer alignment + * as captured in trbe_cpudata->trbe_align. + * + * + * head tail wakeup + * +---------------------------------------+----- ~ ~ ------ + * |$$$$$$$|################|$$$$$$$$$$$$$$| | + * +---------------------------------------+----- ~ ~ ------ + * Base Pointer Write Pointer Limit Pointer + * + * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing + * values which tracks all the driver writes and user reads from the perf auxiliary + * buffer. Generally [head..tail] is the area where the driver can write into unless + * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and + * configured depending on the perf_output_handle indices, so that the driver does + * not override into areas in the perf auxiliary buffer which is being or yet to be + * consumed from the user space. The enabled TRBE buffer area is a moving subset of + * the allocated perf auxiliary buffer. + */ +static void trbe_pad_buf(struct perf_output_handle *handle, int len) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len); + if (!buf->snapshot) + perf_aux_output_skip(handle, len); +} + +static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + /* + * The ETE trace has alignment synchronization packets allowing + * the decoder to reset in case of an overflow or corruption. + * So we can use the entire buffer for the snapshot mode. + */ + return buf->nr_pages * PAGE_SIZE; +} + +/* + * TRBE Limit Calculation + * + * The following markers are used to illustrate various TRBE buffer situations. + * + * $$$$ - Data area, unconsumed captured trace data, not to be overridden + * #### - Free area, enabled, trace will be written + * %%%% - Free area, disabled, trace will not be written + * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped + */ +static unsigned long __trbe_normal_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + struct trbe_cpudata *cpudata = buf->cpudata; + const u64 bufsize = buf->nr_pages * PAGE_SIZE; + u64 limit = bufsize; + u64 head, tail, wakeup; + + head = PERF_IDX2OFF(handle->head, buf); + + /* + * head + * ------->| + * | + * head TRBE align tail + * +----|-------|---------------|-------+ + * |$$$$|=======|###############|$$$$$$$| + * +----|-------|---------------|-------+ + * trbe_base trbe_base + nr_pages + * + * Perf aux buffer output head position can be misaligned depending on + * various factors including user space reads. In case misaligned, head + * needs to be aligned before TRBE can be configured. Pad the alignment + * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools + * and skip this section thus advancing the head. + */ + if (!IS_ALIGNED(head, cpudata->trbe_align)) { + unsigned long delta = roundup(head, cpudata->trbe_align) - head; + + delta = min(delta, handle->size); + trbe_pad_buf(handle, delta); + head = PERF_IDX2OFF(handle->head, buf); + } + + /* + * head = tail (size = 0) + * +----|-------------------------------+ + * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ | + * +----|-------------------------------+ + * trbe_base trbe_base + nr_pages + * + * Perf aux buffer does not have any space for the driver to write into. + * Just communicate trace truncation event to the user space by marking + * it with PERF_AUX_FLAG_TRUNCATED. + */ + if (!handle->size) { + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + return 0; + } + + /* Compute the tail and wakeup indices now that we've aligned head */ + tail = PERF_IDX2OFF(handle->head + handle->size, buf); + wakeup = PERF_IDX2OFF(handle->wakeup, buf); + + /* + * Lets calculate the buffer area which TRBE could write into. There + * are three possible scenarios here. Limit needs to be aligned with + * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the + * unconsumed data. + * + * 1) head < tail + * + * head tail + * +----|-----------------------|-------+ + * |$$$$|#######################|$$$$$$$| + * +----|-----------------------|-------+ + * trbe_base limit trbe_base + nr_pages + * + * TRBE could write into [head..tail] area. Unless the tail is right at + * the end of the buffer, neither an wrap around nor an IRQ is expected + * while being enabled. + * + * 2) head == tail + * + * head = tail (size > 0) + * +----|-------------------------------+ + * |%%%%|###############################| + * +----|-------------------------------+ + * trbe_base limit = trbe_base + nr_pages + * + * TRBE should just write into [head..base + nr_pages] area even though + * the entire buffer is empty. Reason being, when the trace reaches the + * end of the buffer, it will just wrap around with an IRQ giving an + * opportunity to reconfigure the buffer. + * + * 3) tail < head + * + * tail head + * +----|-----------------------|-------+ + * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######| + * +----|-----------------------|-------+ + * trbe_base limit = trbe_base + nr_pages + * + * TRBE should just write into [head..base + nr_pages] area even though + * the [trbe_base..tail] is also empty. Reason being, when the trace + * reaches the end of the buffer, it will just wrap around with an IRQ + * giving an opportunity to reconfigure the buffer. + */ + if (head < tail) + limit = round_down(tail, PAGE_SIZE); + + /* + * Wakeup may be arbitrarily far into the future. If it's not in the + * current generation, either we'll wrap before hitting it, or it's + * in the past and has been handled already. + * + * If there's a wakeup before we wrap, arrange to be woken up by the + * page boundary following it. Keep the tail boundary if that's lower. + * + * head wakeup tail + * +----|---------------|-------|-------+ + * |$$$$|###############|%%%%%%%|$$$$$$$| + * +----|---------------|-------|-------+ + * trbe_base limit trbe_base + nr_pages + */ + if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) + limit = min(limit, round_up(wakeup, PAGE_SIZE)); + + /* + * There are two situation when this can happen i.e limit is before + * the head and hence TRBE cannot be configured. + * + * 1) head < tail (aligned down with PAGE_SIZE) and also they are both + * within the same PAGE size range. + * + * PAGE_SIZE + * |----------------------| + * + * limit head tail + * +------------|------|--------|-------+ + * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$| + * +------------|------|--------|-------+ + * trbe_base trbe_base + nr_pages + * + * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both + * head and wakeup are within same PAGE size range. + * + * PAGE_SIZE + * |----------------------| + * + * limit head wakeup tail + * +----|------|-------|--------|-------+ + * |$$$$$$$$$$$|=======|========|$$$$$$$| + * +----|------|-------|--------|-------+ + * trbe_base trbe_base + nr_pages + */ + if (limit > head) + return limit; + + trbe_pad_buf(handle, handle->size); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + return 0; +} + +static unsigned long trbe_normal_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = perf_get_aux(handle); + u64 limit = __trbe_normal_offset(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + /* + * If the head is too close to the limit and we don't + * have space for a meaningful run, we rather pad it + * and start fresh. + */ + if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) { + trbe_pad_buf(handle, limit - head); + limit = __trbe_normal_offset(handle); + } + return limit; +} + +static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + unsigned long offset; + + if (buf->snapshot) + offset = trbe_snapshot_offset(handle); + else + offset = trbe_normal_offset(handle); + return buf->trbe_base + offset; +} + +static void clr_trbe_status(void) +{ + u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); + + WARN_ON(is_trbe_enabled()); + trbsr &= ~TRBSR_IRQ; + trbsr &= ~TRBSR_TRG; + trbsr &= ~TRBSR_WRAP; + trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT); + trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT); + trbsr &= ~TRBSR_STOP; + write_sysreg_s(trbsr, SYS_TRBSR_EL1); +} + +static void set_trbe_limit_pointer_enabled(unsigned long addr) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + + trblimitr &= ~TRBLIMITR_NVM; + trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT); + trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT); + trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + + /* + * Fill trace buffer mode is used here while configuring the + * TRBE for trace capture. In this particular mode, the trace + * collection is stopped and a maintenance interrupt is raised + * when the current write pointer wraps. This pause in trace + * collection gives the software an opportunity to capture the + * trace data in the interrupt handler, before reconfiguring + * the TRBE. + */ + trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT; + + /* + * Trigger mode is not used here while configuring the TRBE for + * the trace capture. Hence just keep this in the ignore mode. + */ + trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) << + TRBLIMITR_TRIG_MODE_SHIFT; + trblimitr |= (addr & PAGE_MASK); + + trblimitr |= TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + + /* Synchronize the TRBE enable event */ + isb(); +} + +static void trbe_enable_hw(struct trbe_buf *buf) +{ + WARN_ON(buf->trbe_write < buf->trbe_base); + WARN_ON(buf->trbe_write >= buf->trbe_limit); + set_trbe_disabled(); + isb(); + clr_trbe_status(); + set_trbe_base_pointer(buf->trbe_base); + set_trbe_write_pointer(buf->trbe_write); + + /* + * Synchronize all the register updates + * till now before enabling the TRBE. + */ + isb(); + set_trbe_limit_pointer_enabled(buf->trbe_limit); +} + +static enum trbe_fault_action trbe_get_fault_act(u64 trbsr) +{ + int ec = get_trbe_ec(trbsr); + int bsc = get_trbe_bsc(trbsr); + + WARN_ON(is_trbe_running(trbsr)); + if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr)) + return TRBE_FAULT_ACT_FATAL; + + if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT)) + return TRBE_FAULT_ACT_FATAL; + + if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) { + if (get_trbe_write_pointer() == get_trbe_base_pointer()) + return TRBE_FAULT_ACT_WRAP; + } + return TRBE_FAULT_ACT_SPURIOUS; +} + +static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, + struct perf_event *event, void **pages, + int nr_pages, bool snapshot) +{ + struct trbe_buf *buf; + struct page **pglist; + int i; + + /* + * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with + * just a single page, there would not be any room left while writing + * into a partially filled TRBE buffer after the page size alignment. + * Hence restrict the minimum buffer size as two pages. + */ + if (nr_pages < 2) + return NULL; + + buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event)); + if (!buf) + return ERR_PTR(-ENOMEM); + + pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + kfree(buf); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nr_pages; i++) + pglist[i] = virt_to_page(pages[i]); + + buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); + if (!buf->trbe_base) { + kfree(pglist); + kfree(buf); + return ERR_PTR(-ENOMEM); + } + buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE; + buf->trbe_write = buf->trbe_base; + buf->snapshot = snapshot; + buf->nr_pages = nr_pages; + buf->pages = pages; + kfree(pglist); + return buf; +} + +static void arm_trbe_free_buffer(void *config) +{ + struct trbe_buf *buf = config; + + vunmap((void *)buf->trbe_base); + kfree(buf); +} + +static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *config) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct trbe_buf *buf = config; + enum trbe_fault_action act; + unsigned long size, offset; + unsigned long write, base, status; + unsigned long flags; + + WARN_ON(buf->cpudata != cpudata); + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (cpudata->mode != CS_MODE_PERF) + return 0; + + perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW); + + /* + * We are about to disable the TRBE. And this could in turn + * fill up the buffer triggering, an IRQ. This could be consumed + * by the PE asynchronously, causing a race here against + * the IRQ handler in closing out the handle. So, let us + * make sure the IRQ can't trigger while we are collecting + * the buffer. We also make sure that a WRAP event is handled + * accordingly. + */ + local_irq_save(flags); + + /* + * If the TRBE was disabled due to lack of space in the AUX buffer or a + * spurious fault, the driver leaves it disabled, truncating the buffer. + * Since the etm_perf driver expects to close out the AUX buffer, the + * driver skips it. Thus, just pass in 0 size here to indicate that the + * buffer was truncated. + */ + if (!is_trbe_enabled()) { + size = 0; + goto done; + } + /* + * perf handle structure needs to be shared with the TRBE IRQ handler for + * capturing trace data and restarting the handle. There is a probability + * of an undefined reference based crash when etm event is being stopped + * while a TRBE IRQ also getting processed. This happens due the release + * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping + * the TRBE here will ensure that no IRQ could be generated when the perf + * handle gets freed in etm_event_stop(). + */ + trbe_drain_and_disable_local(); + write = get_trbe_write_pointer(); + base = get_trbe_base_pointer(); + + /* Check if there is a pending interrupt and handle it here */ + status = read_sysreg_s(SYS_TRBSR_EL1); + if (is_trbe_irq(status)) { + + /* + * Now that we are handling the IRQ here, clear the IRQ + * from the status, to let the irq handler know that it + * is taken care of. + */ + clr_trbe_irq(); + isb(); + + act = trbe_get_fault_act(status); + /* + * If this was not due to a WRAP event, we have some + * errors and as such buffer is empty. + */ + if (act != TRBE_FAULT_ACT_WRAP) { + size = 0; + goto done; + } + + /* + * Otherwise, the buffer is full and the write pointer + * has reached base. Adjust this back to the Limit pointer + * for correct size. Also, mark the buffer truncated. + */ + write = get_trbe_limit_pointer(); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + } + + offset = write - base; + if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf))) + size = 0; + else + size = offset - PERF_IDX2OFF(handle->head, buf); + +done: + local_irq_restore(flags); + + if (buf->snapshot) + handle->head += size; + return size; +} + +static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct perf_output_handle *handle = data; + struct trbe_buf *buf = etm_perf_sink_config(handle); + + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (mode != CS_MODE_PERF) + return -EINVAL; + + *this_cpu_ptr(drvdata->handle) = handle; + cpudata->buf = buf; + cpudata->mode = mode; + buf->cpudata = cpudata; + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_stop_and_truncate_event(handle); + return 0; + } + trbe_enable_hw(buf); + return 0; +} + +static int arm_trbe_disable(struct coresight_device *csdev) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct trbe_buf *buf = cpudata->buf; + + WARN_ON(buf->cpudata != cpudata); + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (cpudata->mode != CS_MODE_PERF) + return -EINVAL; + + trbe_drain_and_disable_local(); + buf->cpudata = NULL; + cpudata->buf = NULL; + cpudata->mode = CS_MODE_DISABLED; + return 0; +} + +static void trbe_handle_spurious(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_drain_and_disable_local(); + return; + } + trbe_enable_hw(buf); +} + +static void trbe_handle_overflow(struct perf_output_handle *handle) +{ + struct perf_event *event = handle->event; + struct trbe_buf *buf = etm_perf_sink_config(handle); + unsigned long offset, size; + struct etm_event_data *event_data; + + offset = get_trbe_limit_pointer() - get_trbe_base_pointer(); + size = offset - PERF_IDX2OFF(handle->head, buf); + if (buf->snapshot) + handle->head += size; + + /* + * Mark the buffer as truncated, as we have stopped the trace + * collection upon the WRAP event, without stopping the source. + */ + perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW | + PERF_AUX_FLAG_TRUNCATED); + perf_aux_output_end(handle, size); + event_data = perf_aux_output_begin(handle, event); + if (!event_data) { + /* + * We are unable to restart the trace collection, + * thus leave the TRBE disabled. The etm-perf driver + * is able to detect this with a disconnected handle + * (handle->event = NULL). + */ + trbe_drain_and_disable_local(); + *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; + return; + } + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_stop_and_truncate_event(handle); + return; + } + *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle; + trbe_enable_hw(buf); +} + +static bool is_perf_trbe(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + struct trbe_cpudata *cpudata = buf->cpudata; + struct trbe_drvdata *drvdata = cpudata->drvdata; + int cpu = smp_processor_id(); + + WARN_ON(buf->trbe_base != get_trbe_base_pointer()); + WARN_ON(buf->trbe_limit != get_trbe_limit_pointer()); + + if (cpudata->mode != CS_MODE_PERF) + return false; + + if (cpudata->cpu != cpu) + return false; + + if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + return false; + + return true; +} + +static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) +{ + struct perf_output_handle **handle_ptr = dev; + struct perf_output_handle *handle = *handle_ptr; + enum trbe_fault_action act; + u64 status; + + /* + * Ensure the trace is visible to the CPUs and + * any external aborts have been resolved. + */ + trbe_drain_and_disable_local(); + + status = read_sysreg_s(SYS_TRBSR_EL1); + /* + * If the pending IRQ was handled by update_buffer callback + * we have nothing to do here. + */ + if (!is_trbe_irq(status)) + return IRQ_NONE; + + clr_trbe_irq(); + isb(); + + if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle)) + return IRQ_NONE; + + if (!is_perf_trbe(handle)) + return IRQ_NONE; + + /* + * Ensure perf callbacks have completed, which may disable + * the trace buffer in response to a TRUNCATION flag. + */ + irq_work_run(); + + act = trbe_get_fault_act(status); + switch (act) { + case TRBE_FAULT_ACT_WRAP: + trbe_handle_overflow(handle); + break; + case TRBE_FAULT_ACT_SPURIOUS: + trbe_handle_spurious(handle); + break; + case TRBE_FAULT_ACT_FATAL: + trbe_stop_and_truncate_event(handle); + break; + } + return IRQ_HANDLED; +} + +static const struct coresight_ops_sink arm_trbe_sink_ops = { + .enable = arm_trbe_enable, + .disable = arm_trbe_disable, + .alloc_buffer = arm_trbe_alloc_buffer, + .free_buffer = arm_trbe_free_buffer, + .update_buffer = arm_trbe_update_buffer, +}; + +static const struct coresight_ops arm_trbe_cs_ops = { + .sink_ops = &arm_trbe_sink_ops, +}; + +static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct trbe_cpudata *cpudata = dev_get_drvdata(dev); + + return sprintf(buf, "%llx\n", cpudata->trbe_align); +} +static DEVICE_ATTR_RO(align); + +static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct trbe_cpudata *cpudata = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", cpudata->trbe_flag); +} +static DEVICE_ATTR_RO(flag); + +static struct attribute *arm_trbe_attrs[] = { + &dev_attr_align.attr, + &dev_attr_flag.attr, + NULL, +}; + +static const struct attribute_group arm_trbe_group = { + .attrs = arm_trbe_attrs, +}; + +static const struct attribute_group *arm_trbe_groups[] = { + &arm_trbe_group, + NULL, +}; + +static void arm_trbe_enable_cpu(void *info) +{ + struct trbe_drvdata *drvdata = info; + + trbe_reset_local(); + enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE); +} + +static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu) +{ + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); + struct coresight_desc desc = { 0 }; + struct device *dev; + + if (WARN_ON(trbe_csdev)) + return; + + dev = &cpudata->drvdata->pdev->dev; + desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu); + if (!desc.name) + goto cpu_clear; + + desc.type = CORESIGHT_DEV_TYPE_SINK; + desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM; + desc.ops = &arm_trbe_cs_ops; + desc.pdata = dev_get_platdata(dev); + desc.groups = arm_trbe_groups; + desc.dev = dev; + trbe_csdev = coresight_register(&desc); + if (IS_ERR(trbe_csdev)) + goto cpu_clear; + + dev_set_drvdata(&trbe_csdev->dev, cpudata); + coresight_set_percpu_sink(cpu, trbe_csdev); + return; +cpu_clear: + cpumask_clear_cpu(cpu, &drvdata->supported_cpus); +} + +static void arm_trbe_probe_cpu(void *info) +{ + struct trbe_drvdata *drvdata = info; + int cpu = smp_processor_id(); + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + u64 trbidr; + + if (WARN_ON(!cpudata)) + goto cpu_clear; + + if (!is_trbe_available()) { + pr_err("TRBE is not implemented on cpu %d\n", cpu); + goto cpu_clear; + } + + trbidr = read_sysreg_s(SYS_TRBIDR_EL1); + if (!is_trbe_programmable(trbidr)) { + pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu); + goto cpu_clear; + } + + cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr); + if (cpudata->trbe_align > SZ_2K) { + pr_err("Unsupported alignment on cpu %d\n", cpu); + goto cpu_clear; + } + cpudata->trbe_flag = get_trbe_flag_update(trbidr); + cpudata->cpu = cpu; + cpudata->drvdata = drvdata; + return; +cpu_clear: + cpumask_clear_cpu(cpu, &drvdata->supported_cpus); +} + +static void arm_trbe_remove_coresight_cpu(void *info) +{ + int cpu = smp_processor_id(); + struct trbe_drvdata *drvdata = info; + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); + + disable_percpu_irq(drvdata->irq); + trbe_reset_local(); + if (trbe_csdev) { + coresight_unregister(trbe_csdev); + cpudata->drvdata = NULL; + coresight_set_percpu_sink(cpu, NULL); + } +} + +static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata) +{ + int cpu; + + drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata)); + if (!drvdata->cpudata) + return -ENOMEM; + + for_each_cpu(cpu, &drvdata->supported_cpus) { + smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_register_coresight_cpu(drvdata, cpu); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1); + } + return 0; +} + +static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata) +{ + int cpu; + + for_each_cpu(cpu, &drvdata->supported_cpus) + smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1); + free_percpu(drvdata->cpudata); + return 0; +} + +static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node) +{ + struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); + + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { + + /* + * If this CPU was not probed for TRBE, + * initialize it now. + */ + if (!coresight_get_percpu_sink(cpu)) { + arm_trbe_probe_cpu(drvdata); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_register_coresight_cpu(drvdata, cpu); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_enable_cpu(drvdata); + } else { + arm_trbe_enable_cpu(drvdata); + } + } + return 0; +} + +static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node) +{ + struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); + + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { + disable_percpu_irq(drvdata->irq); + trbe_reset_local(); + } + return 0; +} + +static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) +{ + enum cpuhp_state trbe_online; + int ret; + + trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME, + arm_trbe_cpu_startup, arm_trbe_cpu_teardown); + if (trbe_online < 0) + return trbe_online; + + ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node); + if (ret) { + cpuhp_remove_multi_state(trbe_online); + return ret; + } + drvdata->trbe_online = trbe_online; + return 0; +} + +static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) +{ + cpuhp_remove_multi_state(drvdata->trbe_online); +} + +static int arm_trbe_probe_irq(struct platform_device *pdev, + struct trbe_drvdata *drvdata) +{ + int ret; + + drvdata->irq = platform_get_irq(pdev, 0); + if (drvdata->irq < 0) { + pr_err("IRQ not found for the platform device\n"); + return drvdata->irq; + } + + if (!irq_is_percpu(drvdata->irq)) { + pr_err("IRQ is not a PPI\n"); + return -EINVAL; + } + + if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus)) + return -EINVAL; + + drvdata->handle = alloc_percpu(struct perf_output_handle *); + if (!drvdata->handle) + return -ENOMEM; + + ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle); + if (ret) { + free_percpu(drvdata->handle); + return ret; + } + return 0; +} + +static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata) +{ + free_percpu_irq(drvdata->irq, drvdata->handle); + free_percpu(drvdata->handle); +} + +static int arm_trbe_device_probe(struct platform_device *pdev) +{ + struct coresight_platform_data *pdata; + struct trbe_drvdata *drvdata; + struct device *dev = &pdev->dev; + int ret; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + pdata = coresight_get_platform_data(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + dev_set_drvdata(dev, drvdata); + dev->platform_data = pdata; + drvdata->pdev = pdev; + ret = arm_trbe_probe_irq(pdev, drvdata); + if (ret) + return ret; + + ret = arm_trbe_probe_coresight(drvdata); + if (ret) + goto probe_failed; + + ret = arm_trbe_probe_cpuhp(drvdata); + if (ret) + goto cpuhp_failed; + + return 0; +cpuhp_failed: + arm_trbe_remove_coresight(drvdata); +probe_failed: + arm_trbe_remove_irq(drvdata); + return ret; +} + +static int arm_trbe_device_remove(struct platform_device *pdev) +{ + struct trbe_drvdata *drvdata = platform_get_drvdata(pdev); + + arm_trbe_remove_cpuhp(drvdata); + arm_trbe_remove_coresight(drvdata); + arm_trbe_remove_irq(drvdata); + return 0; +} + +static const struct of_device_id arm_trbe_of_match[] = { + { .compatible = "arm,trace-buffer-extension"}, + {}, +}; +MODULE_DEVICE_TABLE(of, arm_trbe_of_match); + +static struct platform_driver arm_trbe_driver = { + .driver = { + .name = DRVNAME, + .of_match_table = of_match_ptr(arm_trbe_of_match), + .suppress_bind_attrs = true, + }, + .probe = arm_trbe_device_probe, + .remove = arm_trbe_device_remove, +}; + +static int __init arm_trbe_init(void) +{ + int ret; + + if (arm64_kernel_unmapped_at_el0()) { + pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n"); + return -EOPNOTSUPP; + } + + ret = platform_driver_register(&arm_trbe_driver); + if (!ret) + return 0; + + pr_err("Error registering %s platform driver\n", DRVNAME); + return ret; +} + +static void __exit arm_trbe_exit(void) +{ + platform_driver_unregister(&arm_trbe_driver); +} +module_init(arm_trbe_init); +module_exit(arm_trbe_exit); + +MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>"); +MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h new file mode 100644 index 000000000000..abf3e36082f0 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-trbe.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This contains all required hardware related helper functions for + * Trace Buffer Extension (TRBE) driver in the coresight framework. + * + * Copyright (C) 2020 ARM Ltd. + * + * Author: Anshuman Khandual <anshuman.khandual@arm.com> + */ +#include <linux/coresight.h> +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/smp.h> + +#include "coresight-etm-perf.h" + +static inline bool is_trbe_available(void) +{ + u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); + unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT); + + return trbe >= 0b0001; +} + +static inline bool is_trbe_enabled(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + return trblimitr & TRBLIMITR_ENABLE; +} + +#define TRBE_EC_OTHERS 0 +#define TRBE_EC_STAGE1_ABORT 36 +#define TRBE_EC_STAGE2_ABORT 37 + +static inline int get_trbe_ec(u64 trbsr) +{ + return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK; +} + +#define TRBE_BSC_NOT_STOPPED 0 +#define TRBE_BSC_FILLED 1 +#define TRBE_BSC_TRIGGERED 2 + +static inline int get_trbe_bsc(u64 trbsr) +{ + return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK; +} + +static inline void clr_trbe_irq(void) +{ + u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); + + trbsr &= ~TRBSR_IRQ; + write_sysreg_s(trbsr, SYS_TRBSR_EL1); +} + +static inline bool is_trbe_irq(u64 trbsr) +{ + return trbsr & TRBSR_IRQ; +} + +static inline bool is_trbe_trg(u64 trbsr) +{ + return trbsr & TRBSR_TRG; +} + +static inline bool is_trbe_wrap(u64 trbsr) +{ + return trbsr & TRBSR_WRAP; +} + +static inline bool is_trbe_abort(u64 trbsr) +{ + return trbsr & TRBSR_ABORT; +} + +static inline bool is_trbe_running(u64 trbsr) +{ + return !(trbsr & TRBSR_STOP); +} + +#define TRBE_TRIG_MODE_STOP 0 +#define TRBE_TRIG_MODE_IRQ 1 +#define TRBE_TRIG_MODE_IGNORE 3 + +#define TRBE_FILL_MODE_FILL 0 +#define TRBE_FILL_MODE_WRAP 1 +#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3 + +static inline void set_trbe_disabled(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + trblimitr &= ~TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); +} + +static inline bool get_trbe_flag_update(u64 trbidr) +{ + return trbidr & TRBIDR_FLAG; +} + +static inline bool is_trbe_programmable(u64 trbidr) +{ + return !(trbidr & TRBIDR_PROG); +} + +static inline int get_trbe_address_align(u64 trbidr) +{ + return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK; +} + +static inline unsigned long get_trbe_write_pointer(void) +{ + return read_sysreg_s(SYS_TRBPTR_EL1); +} + +static inline void set_trbe_write_pointer(unsigned long addr) +{ + WARN_ON(is_trbe_enabled()); + write_sysreg_s(addr, SYS_TRBPTR_EL1); +} + +static inline unsigned long get_trbe_limit_pointer(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + return addr; +} + +static inline unsigned long get_trbe_base_pointer(void) +{ + u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1); + unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT); + + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + return addr; +} + +static inline void set_trbe_base_pointer(unsigned long addr) +{ + WARN_ON(is_trbe_enabled()); + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + write_sysreg_s(addr, SYS_TRBBASER_EL1); +} diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index c9ac3dc65113..24d0c974bfd5 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -844,7 +844,7 @@ static irqreturn_t intel_th_irq(int irq, void *data) * @irq: irq number */ struct intel_th * -intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, +intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata, struct resource *devres, unsigned int ndevres) { int err, r, nr_mmios = 0; diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index f72803a02391..28509b02a0b5 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, output->active = false; for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { + TH_CONFIGURABLE_MASTERS + 1) { gth_master_set(gth, master, -1); } spin_unlock(>h->gth_lock); @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, othdev->output.port = -1; othdev->output.active = false; gth->output[port].output = NULL; - for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) + for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++) if (gth->master[master] == port) gth->master[master] = -1; spin_unlock(>h->gth_lock); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 5fe694708b7a..89c67e0e1d34 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -74,7 +74,7 @@ struct intel_th_drvdata { */ struct intel_th_device { struct device dev; - struct intel_th_drvdata *drvdata; + const struct intel_th_drvdata *drvdata; struct resource *resource; unsigned int num_resources; unsigned int type; @@ -178,7 +178,7 @@ struct intel_th_driver { /* file_operations for those who want a device node */ const struct file_operations *fops; /* optional attributes */ - struct attribute_group *attr_group; + const struct attribute_group *attr_group; /* source ops */ int (*set_output)(struct intel_th_device *thdev, @@ -224,7 +224,7 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) } struct intel_th * -intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, +intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata, struct resource *devres, unsigned int ndevres); void intel_th_free(struct intel_th *th); @@ -272,7 +272,7 @@ struct intel_th { struct intel_th_device *thdev[TH_SUBDEVICE_MAX]; struct intel_th_device *hub; - struct intel_th_drvdata *drvdata; + const struct intel_th_drvdata *drvdata; struct resource resource[TH_MMIO_END]; int (*activate)(struct intel_th *); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 7d95242db900..2edc4666633d 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -2095,7 +2095,7 @@ static struct attribute *msc_output_attrs[] = { NULL, }; -static struct attribute_group msc_output_group = { +static const struct attribute_group msc_output_group = { .attrs = msc_output_attrs, }; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 251e75c9ba9d..7da4f298ed01 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -71,7 +71,7 @@ static void intel_th_pci_deactivate(struct intel_th *th) static int intel_th_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct intel_th_drvdata *drvdata = (void *)id->driver_data; + const struct intel_th_drvdata *drvdata = (void *)id->driver_data; struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = { [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR], [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR], @@ -274,10 +274,20 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Alder Lake-M */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Rocket Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index 0da6b787f553..09132ab8bc23 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -142,7 +142,7 @@ static struct attribute *pti_output_attrs[] = { NULL, }; -static struct attribute_group pti_output_group = { +static const struct attribute_group pti_output_group = { .attrs = pti_output_attrs, }; @@ -295,7 +295,7 @@ static struct attribute *lpp_output_attrs[] = { NULL, }; -static struct attribute_group lpp_output_group = { +static const struct attribute_group lpp_output_group = { .attrs = lpp_output_attrs, }; diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c index 360b5c03df95..8254971c02e7 100644 --- a/drivers/hwtracing/stm/p_sys-t.c +++ b/drivers/hwtracing/stm/p_sys-t.c @@ -92,7 +92,7 @@ static void sys_t_policy_node_init(void *priv) { struct sys_t_policy_node *pn = priv; - generate_random_uuid(pn->uuid.b); + uuid_gen(&pn->uuid); } static int sys_t_output_open(void *priv, struct stm_output *output) @@ -292,6 +292,7 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, unsigned int m = output->master; const unsigned char nil = 0; u32 header = DATA_HEADER; + u8 uuid[UUID_SIZE]; ssize_t sz; /* We require an existing policy node to proceed */ @@ -322,7 +323,8 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, return sz; /* GUID */ - sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE); + export_uuid(uuid, &op->node.uuid); + sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid)); if (sz <= 0) return sz; diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 603b4a9969d3..42103c3a177f 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -57,11 +57,6 @@ void stp_policy_node_get_ranges(struct stp_policy_node *policy_node, *cend = policy_node->last_channel; } -static inline char *stp_policy_node_name(struct stp_policy_node *policy_node) -{ - return policy_node->group.cg_item.ci_name ? : "<none>"; -} - static inline struct stp_policy *to_stp_policy(struct config_item *item) { return item ? |