/* * An ETM context for a running event includes the perf aux handle * and aux_data. For ETM, the aux_data (etm_event_data), consists of * the trace path and the sink configuration. The event data is accessible * via perf_get_aux(handle). However, a sink could "end" a perf output * handle via the IRQ handler. And if the "sink" encounters a failure * to "begin" another session (e.g due to lack of space in the buffer), * the handle will be cleared. Thus, the event_data may not be accessible * from the handle when we get to the etm_event_stop(), which is required * for stopping the trace path. The event_data is guaranteed to stay alive * until "free_aux()", which cannot happen as long as the event is active on * the ETM. Thus the event_data for the session must be part of the ETM context * to make sure we can disable the trace path.
*/ struct etm_ctxt { struct perf_output_handle handle; struct etm_event_data *event_data;
};
/* * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config'; * now take them as general formats and apply on all ETMs.
*/
PMU_FORMAT_ATTR(branch_broadcast, "config:"__stringify(ETM_OPT_BRANCH_BROADCAST));
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); /* preset - if sink ID is used as a configuration selector */
PMU_FORMAT_ATTR(preset, "config:0-3"); /* Sink ID - same for all ETMs */
PMU_FORMAT_ATTR(sinkid, "config2:0-31"); /* config ID - set if a system configuration is selected */
PMU_FORMAT_ATTR(configid, "config2:32-63");
PMU_FORMAT_ATTR(cc_threshold, "config3:0-11");
/* * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 * when the kernel is running at EL1; when the kernel is at EL2, * the PID is in CONTEXTIDR_EL2.
*/ static ssize_t format_attr_contextid_show(struct device *dev, struct device_attribute *attr, char *page)
{ int pid_fmt = ETM_OPT_CTXTID;
/* * Mark perf event as done for trace id allocator, but don't call * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions * never free trace IDs to ensure that the ID associated with a CPU * cannot change during their and other's concurrent sessions. Instead, * a refcount is used so that the last event to call * coresight_trace_id_perf_stop() frees all IDs.
*/
coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
/* * Each CPU has a single path between source and destination. As such * allocate an array using CPU numbers as indexes. That way a path * for any CPU can easily be accessed at any given time. We proceed * the same way for sessions involving a single CPU. The cost of * unused memory when dealing with single CPU trace scenarios is small * compared to the cost of searching through an optimized array.
*/
event_data->path = alloc_percpu(struct coresight_path *);
if (!event_data->path) {
kfree(event_data); return NULL;
}
/* * Check if two given sinks are compatible with each other, * so that they can use the same sink buffers, when an event * moves around.
*/ staticbool sinks_compatible(struct coresight_device *a, struct coresight_device *b)
{ if (!a || !b) returnfalse; /* * If the sinks are of the same subtype and driven * by the same driver, we can use the same buffer * on these sinks.
*/ return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
(sink_ops(a) == sink_ops(b));
}
event_data = alloc_event_data(cpu); if (!event_data) return NULL;
INIT_WORK(&event_data->work, free_event_data);
/* First get the selected sink from user space. */ if (event->attr.config2 & GENMASK_ULL(31, 0)) {
id = (u32)event->attr.config2;
sink = user_sink = coresight_get_sink_by_id(id);
}
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); if (cfg_hash) { if (cscfg_activate_config(cfg_hash)) goto err;
event_data->cfg_hash = cfg_hash;
}
mask = &event_data->mask;
/* * Setup the path for each CPU in a trace session. We try to build * trace path for each CPU in the mask. If we don't find an ETM * for the CPU or fail to build a path, we clear the CPU from the * mask and continue with the rest. If ever we try to trace on those * CPUs, we can handle it and fail the session.
*/
for_each_cpu(cpu, mask) { struct coresight_path *path; struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu); /* * If there is no ETM associated with this CPU clear it from * the mask and continue with the rest. If ever we try to trace * on this CPU, we handle it accordingly.
*/ if (!csdev) {
cpumask_clear_cpu(cpu, mask); continue;
}
/* * If AUX pause feature is enabled but the ETM driver does not * support the operations, clear this CPU from the mask and * continue to next one.
*/ if (event->attr.aux_start_paused &&
(!source_ops(csdev)->pause_perf || !source_ops(csdev)->resume_perf)) {
dev_err_once(&csdev->dev, "AUX pause is not supported.\n");
cpumask_clear_cpu(cpu, mask); continue;
}
/* * No sink provided - look for a default sink for all the ETMs, * where this event can be scheduled. * We allocate the sink specific buffers only once for this * event. If the ETMs have different default sink devices, we * can only use a single "type" of sink as the event can carry * only one sink specific buffer. Thus we have to make sure * that the sinks are of the same type and driven by the same * driver, as the one we allocate the buffer for. As such * we choose the first sink and check if the remaining ETMs * have a compatible default sink. We don't trace on a CPU * if the sink is not compatible.
*/ if (!user_sink) { /* Find the default sink for this ETM */
sink = coresight_find_default_sink(csdev); if (!sink) {
cpumask_clear_cpu(cpu, mask); continue;
}
/* Check if this sink compatible with the last sink */ if (last_sink && !sinks_compatible(last_sink, sink)) {
cpumask_clear_cpu(cpu, mask); continue;
}
last_sink = sink;
}
/* * Building a path doesn't enable it, it simply builds a * list of devices from source to sink that can be * referenced later when the path is actually needed.
*/
path = coresight_build_path(csdev, sink); if (IS_ERR(path)) {
cpumask_clear_cpu(cpu, mask); continue;
}
/* ensure we can allocate a trace ID for this CPU */
coresight_path_assign_trace_id(path, CS_MODE_PERF); if (!IS_VALID_CS_TRACE_ID(path->trace_id)) {
cpumask_clear_cpu(cpu, mask);
coresight_release_path(path); continue;
}
/* no sink found for any CPU - cannot trace */ if (!sink) goto err;
/* If we don't have any CPUs ready for tracing, abort */
cpu = cpumask_first(mask); if (cpu >= nr_cpu_ids) goto err;
if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) goto err;
/* * Allocate the sink buffer for this session. All the sinks * where this event can be scheduled are ensured to be of the * same type. Thus the same sink configuration is used by the * sinks.
*/
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite); if (!event_data->snk_config) goto err;
if (flags & PERF_EF_RESUME) { if (etm_event_resume(csdev, ctxt) < 0) {
dev_err(&csdev->dev, "Failed to resume ETM event.\n"); goto fail;
} return;
}
/* Have we messed up our tracking ? */ if (WARN_ON(ctxt->event_data)) goto fail;
/* * Deal with the ring buffer API and get a handle on the * session's information.
*/
event_data = perf_aux_output_begin(handle, event); if (!event_data) goto fail;
/* * Check if this ETM is allowed to trace, as decided * at etm_setup_aux(). This could be due to an unreachable * sink from this ETM. We can't do much in this case if * the sink was specified or hinted to the driver. For * now, simply don't record anything on this ETM. * * As such we pretend that everything is fine, and let * it continue without actually tracing. The event could * continue tracing when it moves to a CPU where it is * reachable to a sink.
*/ if (!cpumask_test_cpu(cpu, &event_data->mask)) goto out;
path = etm_event_cpu_path(event_data, cpu); /* We need a sink, no need to continue without one */
sink = coresight_get_sink(path); if (WARN_ON_ONCE(!sink)) goto fail_end_stop;
/* Nothing will happen without a path */ if (coresight_enable_path(path, CS_MODE_PERF, handle)) goto fail_end_stop;
/* Finally enable the tracer */ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, path)) goto fail_disable_path;
/* * output cpu / trace ID in perf record, once for the lifetime * of the event.
*/ if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
out: /* Tell the perf core the event is alive */
event->hw.state = 0; /* Save the event_data for this ETM */
ctxt->event_data = event_data; return;
fail_disable_path:
coresight_disable_path(path);
fail_end_stop: /* * Check if the handle is still associated with the event, * to handle cases where if the sink failed to start the * trace and TRUNCATED the handle already.
*/ if (READ_ONCE(handle->event)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
}
fail:
event->hw.state = PERF_HES_STOPPED; return;
}
path = etm_event_cpu_path(ctxt->event_data, cpu);
sink = coresight_get_sink(path); if (WARN_ON_ONCE(!sink)) return;
/* * The per CPU sink has own interrupt handling, it might have * race condition with updating buffer on AUX trace pause if * it is invoked from NMI. To avoid the race condition, * disallows updating buffer for the per CPU sink case.
*/ if (coresight_is_percpu_sink(sink)) return;
if (WARN_ON_ONCE(handle->event != event)) return;
if (!sink_ops(sink)->update_buffer) return;
size = sink_ops(sink)->update_buffer(sink, handle,
ctxt->event_data->snk_config); if (READ_ONCE(handle->event)) { if (!size) return;
if (mode & PERF_EF_PAUSE) return etm_event_pause(event, csdev, ctxt);
/* * If we still have access to the event_data via handle, * confirm that we haven't messed up the tracking.
*/ if (handle->event &&
WARN_ON(perf_get_aux(handle) != ctxt->event_data)) return;
event_data = ctxt->event_data; /* Clear the event_data as this ETM is stopping the trace. */
ctxt->event_data = NULL;
if (event->hw.state == PERF_HES_STOPPED) return;
/* We must have a valid event_data for a running event */ if (WARN_ON(!event_data)) return;
/* * Check if this ETM was allowed to trace, as decided at * etm_setup_aux(). If it wasn't allowed to trace, then * nothing needs to be torn down other than outputting a * zero sized record.
*/ if (handle->event && (mode & PERF_EF_UPDATE) &&
!cpumask_test_cpu(cpu, &event_data->mask)) {
event->hw.state = PERF_HES_STOPPED;
perf_aux_output_end(handle, 0); return;
}
if (!csdev) return;
path = etm_event_cpu_path(event_data, cpu); if (!path) return;
sink = coresight_get_sink(path); if (!sink) return;
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
/* * If the handle is not bound to an event anymore * (e.g, the sink driver was unable to restart the * handle due to lack of buffer space), we don't * have to do anything here.
*/ if (handle->event && (mode & PERF_EF_UPDATE)) { if (WARN_ON_ONCE(handle->event != event)) return;
/* update trace information */ if (!sink_ops(sink)->update_buffer) return;
size = sink_ops(sink)->update_buffer(sink, handle,
event_data->snk_config); /* * Make sure the handle is still valid as the * sink could have closed it from an IRQ. * The sink driver must handle the race with * update_buffer() and IRQ. Thus either we * should get a valid handle and valid size * (which may be 0). * * But we should never get a non-zero size with * an invalid handle.
*/ if (READ_ONCE(handle->event))
perf_aux_output_end(handle, size); else
WARN_ON(size);
}
/* Disabling the path make its elements available to other sessions */
coresight_disable_path(path);
}
staticint etm_event_add(struct perf_event *event, int mode)
{ int ret = 0; struct hw_perf_event *hwc = &event->hw;
if (mode & PERF_EF_START) {
etm_event_start(event, 0); if (hwc->state & PERF_HES_STOPPED)
ret = -EINVAL;
} else {
hwc->state = PERF_HES_STOPPED;
}
return ret;
}
staticvoid etm_event_del(struct perf_event *event, int mode)
{
etm_event_stop(event, PERF_EF_UPDATE);
}
staticint etm_addr_filters_validate(struct list_head *filters)
{ bool range = false, address = false; int index = 0; struct perf_addr_filter *filter;
list_for_each_entry(filter, filters, entry) { /* * No need to go further if there's no more * room for filters.
*/ if (++index > ETM_ADDR_CMP_MAX) return -EOPNOTSUPP;
/* filter::size==0 means single address trigger */ if (filter->size) { /* * The existing code relies on START/STOP filters * being address filters.
*/ if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
filter->action == PERF_ADDR_FILTER_ACTION_STOP) return -EOPNOTSUPP;
range = true;
} else
address = true;
/* * At this time we don't allow range and start/stop filtering * to cohabitate, they have to be mutually exclusive.
*/ if (range && address) return -EOPNOTSUPP;
}
ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL); if (!ea) return ERR_PTR(-ENOMEM);
/* * If this function is called adding a sink then the hash is used for * sink selection - see function coresight_get_sink_by_id(). * If adding a configuration then the hash is used for selection in * cscfg_activate_config()
*/
hash = hashlen_hash(hashlen_string(NULL, name));
sysfs_attr_init(&ea->attr.attr);
ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); if (!ea->attr.attr.name) return ERR_PTR(-ENOMEM);
/* set the show function to the custom cscfg event */ if (!IS_ERR(config_desc->event_ea))
config_desc->event_ea->attr.show = etm_perf_cscfg_event_show; else {
err = PTR_ERR(config_desc->event_ea);
config_desc->event_ea = NULL;
}
return err;
}
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
{ if (!config_desc->event_ea) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.