bool tool_pmu__skip_event(constchar *name __maybe_unused)
{ #if !defined(__aarch64__) /* The slots event should only appear on arm64. */ if (strcasecmp(name, "slots") == 0) returntrue; #endif #if !defined(__i386__) && !defined(__x86_64__) /* The system_tsc_freq event should only appear on x86. */ if (strcasecmp(name, "system_tsc_freq") == 0) returntrue; #endif returnfalse;
}
int tool_pmu__num_skip_events(void)
{ int num = 0;
staticbool read_until_char(struct io *io, char e)
{ int c;
do {
c = io__get_char(io); if (c == -1) returnfalse;
} while (c != e); returntrue;
}
staticint read_stat_field(int fd, struct perf_cpu cpu, int field, __u64 *val)
{ char buf[256]; struct io io; int i;
io__init(&io, fd, buf, sizeof(buf));
/* Skip lines to relevant CPU. */ for (i = -1; i < cpu.cpu; i++) { if (!read_until_char(&io, '\n')) return -EINVAL;
} /* Skip to "cpu". */ if (io__get_char(&io) != 'c') return -EINVAL; if (io__get_char(&io) != 'p') return -EINVAL; if (io__get_char(&io) != 'u') return -EINVAL;
/* Skip N of cpuN. */ if (!read_until_char(&io, ' ')) return -EINVAL;
i = 1; while (true) { if (io__get_dec(&io, val) != ' ') break; if (field == i) return 0;
i++;
} return -EINVAL;
}
staticint read_pid_stat_field(int fd, int field, __u64 *val)
{ char buf[256]; struct io io; int c, i;
io__init(&io, fd, buf, sizeof(buf)); if (io__get_dec(&io, val) != ' ') return -EINVAL; if (field == 1) return 0;
/* Skip comm. */ if (io__get_char(&io) != '(' || !read_until_char(&io, ')')) return -EINVAL; if (field == 2) return -EINVAL; /* String can't be returned. */
/* Skip state */ if (io__get_char(&io) != ' ' || io__get_char(&io) == -1) return -EINVAL; if (field == 3) return -EINVAL; /* String can't be returned. */
/* Loop over numeric fields*/ if (io__get_char(&io) != ' ') return -EINVAL;
i = 4; while (true) {
c = io__get_dec(&io, val); if (c == -1) return -EINVAL; if (c == -2) { /* Assume a -ve was read */
c = io__get_dec(&io, val);
*val *= -1;
} if (c != ' ') return -EINVAL; if (field == i) return 0;
i++;
} return -EINVAL;
}
int evsel__tool_pmu_prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, int nthreads)
{ if ((evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME ||
evsel__tool_event(evsel) == TOOL_PMU__EVENT_USER_TIME) &&
!evsel->start_times) {
evsel->start_times = xyarray__new(perf_cpu_map__nr(cpus),
nthreads, sizeof(__u64)); if (!evsel->start_times) return -ENOMEM;
} return 0;
}
int evsel__tool_pmu_open(struct evsel *evsel, struct perf_thread_map *threads, int start_cpu_map_idx, int end_cpu_map_idx)
{ enum tool_pmu_event ev = evsel__tool_event(evsel); int pid = -1, idx = 0, thread = 0, nthreads, err = 0, old_errno;
if (ev == TOOL_PMU__EVENT_NUM_CPUS) return 0;
if (ev == TOOL_PMU__EVENT_DURATION_TIME) { if (evsel->core.attr.sample_period) /* no sampling */ return -EINVAL;
evsel->start_time = rdclock(); return 0;
}
if (evsel->cgrp)
pid = evsel->cgrp->fd;
nthreads = perf_thread_map__nr(threads); for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) { for (thread = 0; thread < nthreads; thread++) { if (thread >= nthreads) break;
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
if (ev == TOOL_PMU__EVENT_USER_TIME || ev == TOOL_PMU__EVENT_SYSTEM_TIME) { bool system = ev == TOOL_PMU__EVENT_SYSTEM_TIME;
__u64 *start_time = NULL; int fd;
if (evsel->core.attr.sample_period) { /* no sampling */
err = -EINVAL; goto out_close;
} if (pid > -1) { char buf[64];
case TOOL_PMU__EVENT_NUM_CORES:
topology = online_topology();
*result = topology->core_cpus_lists; returntrue;
case TOOL_PMU__EVENT_NUM_CPUS: if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) { /* No evsel to be specific to. */
*result = cpu__max_present_cpu().cpu;
} elseif (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) { /* Evsel just has specific CPUs. */
*result = perf_cpu_map__nr(evsel->core.cpus);
} else { /* * "Any CPU" event that can be scheduled on any CPU in * the PMU's cpumask. The PMU cpumask should be saved in * pmu_cpus. If not present fall back to max.
*/ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus))
*result = perf_cpu_map__nr(evsel->core.pmu_cpus); else
*result = cpu__max_present_cpu().cpu;
} returntrue;
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: { struct perf_cpu_map *online = cpu_map__online();
if (!online) returnfalse;
if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) { /* No evsel to be specific to. */
*result = perf_cpu_map__nr(online);
} elseif (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) { /* Evsel just has specific CPUs. */ struct perf_cpu_map *tmp =
perf_cpu_map__intersect(online, evsel->core.cpus);
*result = perf_cpu_map__nr(tmp);
perf_cpu_map__put(tmp);
} else { /* * "Any CPU" event that can be scheduled on any CPU in * the PMU's cpumask. The PMU cpumask should be saved in * pmu_cpus, if not present then just the online cpu * mask.
*/ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) { struct perf_cpu_map *tmp =
perf_cpu_map__intersect(online, evsel->core.pmu_cpus);
case TOOL_PMU__EVENT_SMT_ON:
*result = smt_on() ? 1 : 0; returntrue;
case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
*result = arch_get_tsc_freq(); returntrue;
case TOOL_PMU__EVENT_NONE: case TOOL_PMU__EVENT_DURATION_TIME: case TOOL_PMU__EVENT_USER_TIME: case TOOL_PMU__EVENT_SYSTEM_TIME: case TOOL_PMU__EVENT_MAX: default: returnfalse;
}
}
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
__u64 *start_time, cur_time, delta_start;
u64 val; int fd, err = 0; struct perf_counts_values *count, *old_count = NULL; bool adjust = false; enum tool_pmu_event ev = evsel__tool_event(evsel);
switch (ev) { case TOOL_PMU__EVENT_HAS_PMEM: case TOOL_PMU__EVENT_NUM_CORES: case TOOL_PMU__EVENT_NUM_CPUS: case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: case TOOL_PMU__EVENT_NUM_DIES: case TOOL_PMU__EVENT_NUM_PACKAGES: case TOOL_PMU__EVENT_SLOTS: case TOOL_PMU__EVENT_SMT_ON: case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: if (evsel->prev_raw_counts)
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
val = 0; if (cpu_map_idx == 0 && thread == 0) { if (!tool_pmu__read_event(ev, evsel, &val)) {
count->lost++;
val = 0;
}
} if (old_count) {
count->val = old_count->val + val;
count->run = old_count->run + 1;
count->ena = old_count->ena + 1;
} else {
count->val = val;
count->run++;
count->ena++;
} return 0; case TOOL_PMU__EVENT_DURATION_TIME: /* * Pretend duration_time is only on the first CPU and thread, or * else aggregation will scale duration_time by the number of * CPUs/threads.
*/
start_time = &evsel->start_time; if (cpu_map_idx == 0 && thread == 0)
cur_time = rdclock(); else
cur_time = *start_time; break; case TOOL_PMU__EVENT_USER_TIME: case TOOL_PMU__EVENT_SYSTEM_TIME: { bool system = evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME;
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
fd = FD(evsel, cpu_map_idx, thread);
lseek(fd, SEEK_SET, 0); if (evsel->pid_stat) { /* The event exists solely on 1 CPU. */ if (cpu_map_idx == 0)
err = read_pid_stat_field(fd, system ? 15 : 14, &cur_time); else
cur_time = 0;
} else { /* The event is for all threads. */ if (thread == 0) { struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus,
cpu_map_idx);
err = read_stat_field(fd, cpu, system ? 3 : 1, &cur_time);
} else {
cur_time = 0;
}
}
adjust = true; break;
} case TOOL_PMU__EVENT_NONE: case TOOL_PMU__EVENT_MAX: default:
err = -EINVAL;
} if (err) return err;
delta_start *= 1000000000 / ticks_per_sec;
}
count->val = delta_start;
count->lost = 0; /* * The values of enabled and running must make a ratio of 100%. The * exact values don't matter as long as they are non-zero to avoid * issues with evsel__count_has_error.
*/
count->ena++;
count->run++; return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.