if (pmu) {
term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus)
? cpu_map__online()
: perf_cpu_map__get(pmu->cpus);
} else {
term_cpus = perf_cpu_map__new(term->val.str); if (!term_cpus && fake_pmu) { /* * Assume the PMU string makes sense on a different * machine and fake a value with all online CPUs.
*/
term_cpus = cpu_map__online();
}
}
}
perf_cpu_map__merge(&cpus, term_cpus);
perf_cpu_map__put(term_cpus);
}
return cpus;
}
/** * fix_raw - For each raw term see if there is an event (aka alias) in pmu that * matches the raw's string value. If the string value matches an * event then change the term to be an event, if not then change it to * be a config term. For example, "read" may be an event of the PMU or * a raw hex encoding of 0xead. The fix-up is done late so the PMU of * the event can be determined and we don't need to scan all PMUs * ahead-of-time. * @config_terms: the list of terms that may contain a raw term. * @pmu: the PMU to scan for events from.
*/ staticvoid fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
{ struct parse_events_term *term;
/* * Ensure the first_wildcard_match's PMU matches that of the new event * being added. Otherwise try to match with another event further down * the evlist.
*/ if (first_wildcard_match) { struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
/** * parse_aliases - search names for entries beginning or equalling str ignoring * case. If mutliple entries in names match str then the longest * is chosen. * @str: The needle to look for. * @names: The haystack to search. * @size: The size of the haystack. * @longest: Out argument giving the length of the matching entry.
*/ staticint parse_aliases(constchar *str, constchar *const names[][EVSEL__MAX_ALIASES], int size, int *longest)
{
*longest = -1; for (int i = 0; i < size; i++) { for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { int n = strlen(names[i][j]);
if (n > *longest && !strncasecmp(str, names[i][j], n))
*longest = n;
} if (*longest > 0) return i;
}
/** * parse_events__decode_legacy_cache - Search name for the legacy cache event * name composed of 1, 2 or 3 hyphen * separated sections. The first section is * the cache type while the others are the * optional op and optional result. To make * life hard the names in the table also * contain hyphens and the longest name * should always be selected.
*/ int parse_events__decode_legacy_cache(constchar *name, int extended_pmu_type, __u64 *config)
{ int len, cache_type = -1, cache_op = -1, cache_result = -1; constchar *name_end = &name[strlen(name) + 1]; constchar *str = name;
cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); if (cache_type == -1) return -EINVAL;
str += len + 1;
if (str < name_end) {
cache_op = parse_aliases(str, evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX, &len); if (cache_op >= 0) { if (!evsel__is_cache_op_valid(cache_type, cache_op)) return -EINVAL;
str += len + 1;
} else {
cache_result = parse_aliases(str, evsel__hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX, &len); if (cache_result >= 0)
str += len + 1;
}
} if (str < name_end) { if (cache_op < 0) {
cache_op = parse_aliases(str, evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX, &len); if (cache_op >= 0) { if (!evsel__is_cache_op_valid(cache_type, cache_op)) return -EINVAL;
}
} elseif (cache_result < 0) {
cache_result = parse_aliases(str, evsel__hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
}
}
/* * Fall back to reads:
*/ if (cache_op == -1)
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
/* * Fall back to accesses:
*/ if (cache_result == -1)
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
/** * parse_events__filter_pmu - returns false if a wildcard PMU should be * considered, true if it should be filtered.
*/ bool parse_events__filter_pmu(conststruct parse_events_state *parse_state, conststruct perf_pmu *pmu)
{ if (parse_state->pmu_filter == NULL) returnfalse;
if (parse_events__filter_pmu(parse_state, pmu)) continue;
if (perf_pmu__have_event(pmu, name)) { /* * The PMU has the event so add as not a legacy cache * event.
*/
ret = parse_events_add_pmu(parse_state, list, pmu,
parsed_terms,
first_wildcard_match, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); if (ret) goto out_err; if (first_wildcard_match == NULL)
first_wildcard_match =
container_of(list->prev, struct evsel, core.node); continue;
}
if (!pmu->is_core) { /* Legacy cache events are only supported by core PMUs. */ continue;
}
if (parse_breakpoint_type(type, &attr)) return -EINVAL;
/* Provide some defaults if len is not specified */ if (!len) { if (attr.bp_type == HW_BREAKPOINT_X)
len = default_breakpoint_len(); else
len = HW_BREAKPOINT_LEN_4;
}
if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
parse_events_error__handle(err, -1,
strdup("Invalid term_type"), NULL); returnfalse;
} if (!config_term_shrinked) returntrue;
switch (term_type) { case PARSE_EVENTS__TERM_TYPE_CONFIG: case PARSE_EVENTS__TERM_TYPE_CONFIG1: case PARSE_EVENTS__TERM_TYPE_CONFIG2: case PARSE_EVENTS__TERM_TYPE_CONFIG3: case PARSE_EVENTS__TERM_TYPE_NAME: case PARSE_EVENTS__TERM_TYPE_METRIC_ID: case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: case PARSE_EVENTS__TERM_TYPE_PERCORE: case PARSE_EVENTS__TERM_TYPE_CPU: returntrue; case PARSE_EVENTS__TERM_TYPE_USER: case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: case PARSE_EVENTS__TERM_TYPE_TIME: case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: case PARSE_EVENTS__TERM_TYPE_STACKSIZE: case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_MAX_STACK: case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: case PARSE_EVENTS__TERM_TYPE_OVERWRITE: case PARSE_EVENTS__TERM_TYPE_DRV_CFG: case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: case PARSE_EVENTS__TERM_TYPE_RAW: case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: case PARSE_EVENTS__TERM_TYPE_HARDWARE: default: if (!err) returnfalse;
/* term_type is validated so indexing is safe */ if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
parse_events__term_type_str(term_type)) >= 0)
parse_events_error__handle(err, -1, err_str, NULL); returnfalse;
}
}
staticint config_term_common(struct perf_event_attr *attr, struct parse_events_term *term, struct parse_events_state *parse_state)
{ #define CHECK_TYPE_VAL(type) \ do { \ if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \ return -EINVAL; \
} while (0)
switch (term->type_term) { case PARSE_EVENTS__TERM_TYPE_CONFIG:
CHECK_TYPE_VAL(NUM);
attr->config = term->val.num; break; case PARSE_EVENTS__TERM_TYPE_CONFIG1:
CHECK_TYPE_VAL(NUM);
attr->config1 = term->val.num; break; case PARSE_EVENTS__TERM_TYPE_CONFIG2:
CHECK_TYPE_VAL(NUM);
attr->config2 = term->val.num; break; case PARSE_EVENTS__TERM_TYPE_CONFIG3:
CHECK_TYPE_VAL(NUM);
attr->config3 = term->val.num; break; case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
CHECK_TYPE_VAL(STR); if (strcmp(term->val.str, "no") &&
parse_branch_str(term->val.str,
&attr->branch_sample_type)) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("invalid branch sample type"),
NULL); return -EINVAL;
} break; case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM); if (term->val.num > 1) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("expected 0 or 1"),
NULL); return -EINVAL;
} break; case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
CHECK_TYPE_VAL(STR); break; case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_INHERIT:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_NAME:
CHECK_TYPE_VAL(STR); break; case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
CHECK_TYPE_VAL(STR); break; case PARSE_EVENTS__TERM_TYPE_RAW:
CHECK_TYPE_VAL(STR); break; case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_PERCORE:
CHECK_TYPE_VAL(NUM); if ((unsignedint)term->val.num > 1) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("expected 0 or 1"),
NULL); return -EINVAL;
} break; case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
CHECK_TYPE_VAL(STR); break; case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM); if (term->val.num > UINT_MAX) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("too big"),
NULL); return -EINVAL;
} break; case PARSE_EVENTS__TERM_TYPE_CPU: { struct perf_cpu_map *map;
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("too big"), /*help=*/NULL); return -EINVAL;
} break;
}
assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR); if (perf_pmus__find(term->val.str) != NULL) break;
map = perf_cpu_map__new(term->val.str); if (!map && !parse_state->fake_pmu) {
parse_events_error__handle(parse_state->error, term->err_val,
strdup("not a valid PMU or CPU number"), /*help=*/NULL); return -EINVAL;
}
perf_cpu_map__put(map); break;
} case PARSE_EVENTS__TERM_TYPE_DRV_CFG: case PARSE_EVENTS__TERM_TYPE_USER: case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: case PARSE_EVENTS__TERM_TYPE_HARDWARE: default:
parse_events_error__handle(parse_state->error, term->err_term,
strdup(parse_events__term_type_str(term->type_term)),
parse_events_formats_error_string(NULL)); return -EINVAL;
}
/* * Check term availability after basic checking so * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. * * If check availability at the entry of this function, * user will see "'<sysfs term>' is not usable in 'perf stat'" * if an invalid config term is provided for legacy events * (for example, instructions/badterm/...), which is confusing.
*/ if (!config_term_avail(term->type_term, parse_state->error)) return -EINVAL; return 0; #undef CHECK_TYPE_VAL
}
if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
parse_events_error__handle(parse_state->error, term->err_term,
err_str, /*help=*/NULL); return -EINVAL;
} /* * Rewrite the PMU event to a legacy cache one unless the PMU * doesn't support legacy cache events or the event is present * within the PMU.
*/ if (perf_pmu__supports_legacy_cache(pmu) &&
!perf_pmu__have_event(pmu, term->config)) {
attr->type = PERF_TYPE_HW_CACHE; return parse_events__decode_legacy_cache(term->config, pmu->type,
&attr->config);
} else {
term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
term->no_value = true;
}
} if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
if (!pmu) { char *err_str;
if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
parse_events_error__handle(parse_state->error, term->err_term,
err_str, /*help=*/NULL); return -EINVAL;
} /* * If the PMU has a sysfs or json event prefer it over * legacy. ARM requires this.
*/ if (perf_pmu__have_event(pmu, term->config)) {
term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
term->no_value = true;
term->alternate_hw_config = true;
} else {
attr->type = PERF_TYPE_HARDWARE;
attr->config = term->val.num; if (perf_pmus__supports_extended_type())
attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
} return 0;
} if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { /* * Always succeed for sysfs terms, as we dont know * at this point what type they need to have.
*/ return 0;
} return config_term_common(attr, term, parse_state);
}
staticint config_term_tracepoint(struct perf_event_attr *attr, struct parse_events_term *term, struct parse_events_state *parse_state)
{ switch (term->type_term) { case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: case PARSE_EVENTS__TERM_TYPE_STACKSIZE: case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_MAX_STACK: case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: case PARSE_EVENTS__TERM_TYPE_OVERWRITE: case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: return config_term_common(attr, term, parse_state); case PARSE_EVENTS__TERM_TYPE_USER: case PARSE_EVENTS__TERM_TYPE_CONFIG: case PARSE_EVENTS__TERM_TYPE_CONFIG1: case PARSE_EVENTS__TERM_TYPE_CONFIG2: case PARSE_EVENTS__TERM_TYPE_CONFIG3: case PARSE_EVENTS__TERM_TYPE_NAME: case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: case PARSE_EVENTS__TERM_TYPE_TIME: case PARSE_EVENTS__TERM_TYPE_DRV_CFG: case PARSE_EVENTS__TERM_TYPE_PERCORE: case PARSE_EVENTS__TERM_TYPE_METRIC_ID: case PARSE_EVENTS__TERM_TYPE_RAW: case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: case PARSE_EVENTS__TERM_TYPE_HARDWARE: case PARSE_EVENTS__TERM_TYPE_CPU: default:
parse_events_error__handle(parse_state->error, term->err_term,
strdup(parse_events__term_type_str(term->type_term)),
strdup("valid terms: call-graph,stack-size\n")
); return -EINVAL;
}
list_for_each_entry(term, &head_config->terms, list) { switch (term->type_term) { case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_TIME:
ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); break; case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); break; case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_INHERIT:
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 1 : 0, term->weak); break; case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 0 : 1, term->weak); break; case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 1 : 0, term->weak); break; case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 0 : 1, term->weak); break; case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); break; case PARSE_EVENTS__TERM_TYPE_PERCORE:
ADD_CONFIG_TERM_VAL(PERCORE, percore,
term->val.num ? true : false, term->weak); break; case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0, term->weak); break; case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); break; case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak); break; case PARSE_EVENTS__TERM_TYPE_USER: case PARSE_EVENTS__TERM_TYPE_CONFIG: case PARSE_EVENTS__TERM_TYPE_CONFIG1: case PARSE_EVENTS__TERM_TYPE_CONFIG2: case PARSE_EVENTS__TERM_TYPE_CONFIG3: case PARSE_EVENTS__TERM_TYPE_NAME: case PARSE_EVENTS__TERM_TYPE_METRIC_ID: case PARSE_EVENTS__TERM_TYPE_RAW: case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: case PARSE_EVENTS__TERM_TYPE_HARDWARE: case PARSE_EVENTS__TERM_TYPE_CPU: default: break;
}
} return 0;
}
/* * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for * each bit of attr->config that the user has changed.
*/ staticint get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, struct list_head *head_terms)
{ struct parse_events_term *term;
u64 bits = 0; int type;
list_for_each_entry(term, &head_config->terms, list) { switch (term->type_term) { case PARSE_EVENTS__TERM_TYPE_USER:
type = perf_pmu__format_type(pmu, term->config); if (type != PERF_PMU_FORMAT_VALUE_CONFIG) continue;
bits |= perf_pmu__format_bits(pmu, term->config); break; case PARSE_EVENTS__TERM_TYPE_CONFIG:
bits = ~(u64)0; break; case PARSE_EVENTS__TERM_TYPE_CONFIG1: case PARSE_EVENTS__TERM_TYPE_CONFIG2: case PARSE_EVENTS__TERM_TYPE_CONFIG3: case PARSE_EVENTS__TERM_TYPE_NAME: case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: case PARSE_EVENTS__TERM_TYPE_TIME: case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: case PARSE_EVENTS__TERM_TYPE_STACKSIZE: case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_MAX_STACK: case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: case PARSE_EVENTS__TERM_TYPE_OVERWRITE: case PARSE_EVENTS__TERM_TYPE_DRV_CFG: case PARSE_EVENTS__TERM_TYPE_PERCORE: case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: case PARSE_EVENTS__TERM_TYPE_METRIC_ID: case PARSE_EVENTS__TERM_TYPE_RAW: case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: case PARSE_EVENTS__TERM_TYPE_HARDWARE: case PARSE_EVENTS__TERM_TYPE_CPU: default: break;
}
}
if (bits)
ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
/* Wildcards on numeric values are only supported by core PMUs. */ if (wildcard && perf_pmus__supports_extended_type()) { struct evsel *first_wildcard_match = NULL; while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { int ret;
found_supported = true; if (parse_events__filter_pmu(parse_state, pmu)) continue;
ret = __parse_events_add_numeric(parse_state, list, pmu,
type, pmu->type,
config, head_config,
first_wildcard_match); if (ret) return ret; if (first_wildcard_match == NULL)
first_wildcard_match =
container_of(list->prev, struct evsel, core.node);
} if (found_supported) return 0;
} return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
type, /*extended_type=*/0, config, head_config, /*first_wildcard_match=*/NULL);
}
parse_events_terms__init(&parsed_terms); if (const_parsed_terms) { int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
if (ret) return ret;
}
fix_raw(&parsed_terms, pmu);
/* Configure attr/terms with a known PMU, this will set hardcoded terms. */ if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms); return -EINVAL;
}
/* Look for event names in the terms and rewrite into format based terms. */ if (perf_pmu__check_alias(pmu, &parsed_terms,
&info, &alias_rewrote_terms,
&alternate_hw_config, err)) {
parse_events_terms__exit(&parsed_terms); return -EINVAL;
}
/* Configure attr/terms again if an alias was expanded. */ if (alias_rewrote_terms &&
config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms); return -EINVAL;
}
if (get_config_terms(&parsed_terms, &config_terms)) {
parse_events_terms__exit(&parsed_terms); return -ENOMEM;
}
/* * When using default config, record which bits of attr->config were * changed by the user.
*/ if (pmu->perf_event_attr_init_default &&
get_config_chgs(pmu, &parsed_terms, &config_terms)) {
parse_events_terms__exit(&parsed_terms); return -ENOMEM;
}
/* Skip configuring hard coded terms that were applied by config_attr. */ if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
parse_state->error)) {
free_config_terms(&config_terms);
parse_events_terms__exit(&parsed_terms); return -EINVAL;
}
*listp = malloc(sizeof(**listp)); if (!*listp) return -ENOMEM;
INIT_LIST_HEAD(*listp);
/* Attempt to add to list assuming event_or_pmu is a PMU name. */
pmu = perf_pmus__find(event_or_pmu); if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
first_wildcard_match, /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) return 0;
if (parse_state->fake_pmu) { if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
const_parsed_terms,
first_wildcard_match, /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) return 0;
}
pmu = NULL; /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
if (parse_events__filter_pmu(parse_state, pmu)) continue;
if (!parse_events_add_pmu(parse_state, *listp, pmu,
const_parsed_terms,
first_wildcard_match, /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
ok++;
parse_state->wild_card_pmus = true;
} if (first_wildcard_match == NULL) {
first_wildcard_match =
container_of((*listp)->prev, struct evsel, core.node);
}
} if (ok) return 0;
/* Failure to add, assume event_or_pmu is an event name. */
zfree(listp); if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
const_parsed_terms, listp, loc)) return 0;
if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
help = NULL;
parse_events_error__handle(parse_state->error, loc->first_column,
strdup("Bad event or PMU"),
help);
zfree(listp); return -EINVAL;
}
if (!group && mod.weak) {
parse_events_error__handle(parse_state->error, loc->first_column,
strdup("Weak modifier is for use with groups"), NULL); return -EINVAL;
}
__evlist__for_each_entry(list, evsel) { /* Translate modifiers into the equivalent evsel excludes. */ int eu = group ? evsel->core.attr.exclude_user : 0; int ek = group ? evsel->core.attr.exclude_kernel : 0; int eh = group ? evsel->core.attr.exclude_hv : 0; int eH = group ? evsel->core.attr.exclude_host : 0; int eG = group ? evsel->core.attr.exclude_guest : 0; int exclude = eu | ek | eh; int exclude_GH = eG | eH;
if (mod.user) { if (!exclude)
exclude = eu = ek = eh = 1;
eu = 0;
} if (mod.kernel) { if (!exclude)
exclude = eu = ek = eh = 1;
ek = 0;
} if (mod.hypervisor) { if (!exclude)
exclude = eu = ek = eh = 1;
eh = 0;
} if (mod.guest) { if (!exclude_GH)
exclude_GH = eG = eH = 1;
eG = 0;
} if (mod.host) { if (!exclude_GH)
exclude_GH = eG = eH = 1;
eH = 0;
} if (!exclude_GH && exclude_GH_default) { if (perf_host)
eG = 1; elseif (perf_guest)
eH = 1;
}
/* Simple modifiers copied to the evsel. */ if (mod.precise) {
u8 precise = evsel->core.attr.precise_ip + mod.precise; /* * precise ip: * * 0 - SAMPLE_IP can have arbitrary skid * 1 - SAMPLE_IP must have constant skid * 2 - SAMPLE_IP requested to have 0 skid * 3 - SAMPLE_IP must have 0 skid * * See also PERF_RECORD_MISC_EXACT_IP
*/ if (precise > 3) { char *help;
if (asprintf(&help, "Maximum combined precise value is 3, adding precision to \"%s\"",
evsel__name(evsel)) > 0) {
parse_events_error__handle(parse_state->error,
loc->first_column,
help, NULL);
} return -EINVAL;
}
evsel->core.attr.precise_ip = precise;
} if (mod.precise_max)
evsel->precise_max = 1; if (mod.non_idle)
evsel->core.attr.exclude_idle = 1; if (mod.sample_read)
evsel->sample_read = 1; if (mod.pinned && evsel__is_group_leader(evsel))
evsel->core.attr.pinned = 1; if (mod.exclusive && evsel__is_group_leader(evsel))
evsel->core.attr.exclusive = 1; if (mod.weak)
evsel->weak_group = true; if (mod.bpf)
evsel->bpf_counter = true; if (mod.retire_lat)
evsel->retire_lat = true;
} return 0;
}
if (!pmu) { /* * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU * is a core PMU, but in heterogeneous systems this is * unknown. For now pick the first core PMU.
*/
pmu = perf_pmus__scan_core(NULL);
} if (!pmu) {
pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); return -EINVAL;
}
group_pmu_name = pmu->name; /* * Software events may be in a group with other uncore PMU events. Use * the pmu_name of the first non-software event to avoid breaking the * software event out of the group. * * Aux event leaders, like intel_pt, expect a group with events from * other PMUs, so substitute the AUX event's PMU in this case.
*/ if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
if (!leader_pmu) { /* As with determining pmu above. */
leader_pmu = perf_pmus__scan_core(NULL);
} /* * Starting with the leader, find the first event with a named * non-software PMU. for_each_group_(member|evsel) isn't used as * the list isn't yet sorted putting evsel's in the same group * together.
*/ if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
group_pmu_name = leader_pmu->name;
} elseif (leader->core.nr_members > 1) {
list_for_each_entry(pos, head, core.node) { struct perf_pmu *pos_pmu;
if (pos == leader || evsel__leader(pos) != leader) continue;
pos_pmu = evsel__find_pmu(pos); if (!pos_pmu) { /* As with determining pmu above. */
pos_pmu = perf_pmus__scan_core(NULL);
} if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
group_pmu_name = pos_pmu->name; break;
}
}
}
} /* Record computed name. */
evsel->group_pmu_name = strdup(group_pmu_name); return evsel->group_pmu_name ? 0 : -ENOMEM;
}
__weak int arch_evlist__cmp(conststruct evsel *lhs, conststruct evsel *rhs)
{ /* Order by insertion index. */ return lhs->core.idx - rhs->core.idx;
}
/* * Get the indexes of the 2 events to sort. If the events are * in groups then the leader's index is used otherwise the * event's index is used. An index may be forced for events that * must be in the same group, namely Intel topdown events.
*/ if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
lhs_sort_idx = *force_grouped_idx;
} else { bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
/* If the indices differ then respect the insertion order. */ if (lhs_sort_idx != rhs_sort_idx) return lhs_sort_idx - rhs_sort_idx;
/* * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should * be in the same group. Events in the same group need to be ordered by * their grouping PMU name as the group will be broken to ensure only * events on the same PMU are programmed together. * * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both * events are being forced to be at force_group_index. If only one event * is being forced then the other event is the group leader of the group * we're trying to force the event into. Ensure for the force grouped * case that the PMU name ordering is also respected.
*/
lhs_pmu_name = lhs->group_pmu_name;
rhs_pmu_name = rhs->group_pmu_name;
ret = strcmp(lhs_pmu_name, rhs_pmu_name); if (ret) return ret;
/* * Architecture specific sorting, by default sort events in the same * group with the same PMU by their insertion index. On Intel topdown * constraints must be adhered to - slots first, etc.
*/ return arch_evlist__cmp(lhs, rhs);
}
int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
{ return 0;
}
/* On x86 topdown metrics events require a slots event. */
ret = arch_evlist__add_required_events(list); if (ret) return ret;
/* * Compute index to insert ungrouped events at. Place them where the * first ungrouped event appears.
*/
list_for_each_entry(pos, list, core.node) { conststruct evsel *pos_leader = evsel__leader(pos);
ret = evsel__compute_group_pmu_name(pos, list); if (ret) return ret;
if (pos == pos_leader)
orig_num_leaders++;
/* * Ensure indexes are sequential, in particular for multiple * event lists being merged. The indexes are used to detect when * the user order is modified.
*/
pos->core.idx = idx++;
/* * Remember an index to sort all forced grouped events * together to. Use the group leader as some events * must appear first within the group.
*/ if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
force_grouped_idx = pos_leader->core.idx;
}
/* * Recompute groups, splitting for PMUs and adding groups for events * that require them.
*/
idx = 0;
list_for_each_entry(pos, list, core.node) { conststruct evsel *pos_leader = evsel__leader(pos); constchar *pos_pmu_name = pos->group_pmu_name; constchar *cur_leader_pmu_name; bool pos_force_grouped = force_grouped_idx != -1 &&
arch_evsel__must_be_in_group(pos);
/* Reset index and nr_members. */ if (pos->core.idx != idx)
idx_changed = true;
pos->core.idx = idx++;
pos->core.nr_members = 0;
/* * Set the group leader respecting the given groupings and that * groups can't span PMUs.
*/ if (!cur_leader) {
cur_leader = pos;
cur_leaders_grp = &pos->core; if (pos_force_grouped)
force_grouped_leader = pos;
}
cur_leader_pmu_name = cur_leader->group_pmu_name; if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { /* PMU changed so the group/leader must change. */
cur_leader = pos;
cur_leaders_grp = pos->core.leader; if (pos_force_grouped && force_grouped_leader == NULL)
force_grouped_leader = pos;
} elseif (cur_leaders_grp != pos->core.leader) { bool split_even_if_last_leader_was_forced = true;
/* * Event is for a different group. If the last event was * the forced group leader then subsequent group events * and forced events should be in the same group. If * there are no other forced group events then the * forced group leader wasn't really being forced into a * group, it just set arch_evsel__must_be_in_group, and * we don't want the group to split here.
*/ if (force_grouped_idx != -1 && last_event_was_forced_leader) { struct evsel *pos2 = pos; /* * Search the whole list as the group leaders * aren't currently valid.
*/
list_for_each_entry_continue(pos2, list, core.node) { if (pos->core.leader == pos2->core.leader &&
arch_evsel__must_be_in_group(pos2)) {
split_even_if_last_leader_was_forced = false; break;
}
}
} if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { if (pos_force_grouped) { if (force_grouped_leader) {
cur_leader = force_grouped_leader;
cur_leaders_grp = force_grouped_leader->core.leader;
} else {
cur_leader = force_grouped_leader = pos;
cur_leaders_grp = &pos->core;
}
} else {
cur_leader = pos;
cur_leaders_grp = pos->core.leader;
}
}
} if (pos_leader != cur_leader) { /* The leader changed so update it. */
evsel__set_leader(pos, cur_leader);
}
last_event_was_forced_leader = (force_grouped_leader == pos);
}
list_for_each_entry(pos, list, core.node) { struct evsel *pos_leader = evsel__leader(pos);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.