if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries", "%d", &topa_multiple_entries) != 1)
topa_multiple_entries = 0;
/* * Use caps/topa_multiple_entries to indicate early hardware that had * extra frequent PSBs.
*/ if (!topa_multiple_entries) {
psb_period = 256; goto out;
}
err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val); if (err)
val = 0;
staticbool intel_pt_exclude_guest(void)
{ int pt_mode;
if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
pt_mode = 0;
return pt_mode == 1;
}
staticvoid intel_pt_valid_str(char *str, size_t len, u64 valid)
{ unsignedint val, last = 0, state = 1; int p = 0;
str[0] = '\0';
for (val = 0; val <= 64; val++, valid >>= 1) { if (valid & 1) {
last = val; switch (state) { case 0:
p += scnprintf(str + p, len - p, ","); /* Fall through */ case 1:
p += scnprintf(str + p, len - p, "%u", val);
state = 2; break; case 2:
state = 3; break; case 3:
state = 4; break; default: break;
}
} else { switch (state) { case 3:
p += scnprintf(str + p, len - p, ",%u", last);
state = 0; break; case 4:
p += scnprintf(str + p, len - p, "-%u", last);
state = 0; break; default: break;
} if (state != 1)
state = 0;
}
}
}
dirfd = perf_pmu__event_source_devices_fd(); if (dirfd < 0) return dirfd;
/* * If supported, force pass-through config term (pt=1) even if user * sets pt=0, which avoids senseless kernel errors.
*/ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
!(evsel->core.attr.config & 1)) {
pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
evsel->core.attr.config |= 1;
}
if (!sz) continue; if (min_sz && (sz < *min_sz || !*min_sz))
*min_sz = sz; if (max_sz && sz > *max_sz)
*max_sz = sz;
}
}
/* * Currently, there is not enough information to disambiguate different PEBS * events, so only allow one.
*/ staticbool intel_pt_too_many_aux_output(struct evlist *evlist)
{ struct evsel *evsel; int aux_output_cnt = 0;
sz = round_up(sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
} if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
opts->auxtrace_snapshot_size,
opts->auxtrace_mmap_pages * (size_t)page_size); return -EINVAL;
} if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); return -EINVAL;
}
pr_debug2("Intel PT snapshot size: %zu\n",
opts->auxtrace_snapshot_size); if (psb_period &&
opts->auxtrace_snapshot_size <= psb_period +
INTEL_PT_PSB_PERIOD_NEAR)
ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
opts->auxtrace_snapshot_size, psb_period);
}
/* Set default sizes for sample mode */ if (opts->auxtrace_sample_mode) {
size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
size_t min_sz = 0, max_sz = 0;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
} if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
max_sz,
opts->auxtrace_mmap_pages * (size_t)page_size); return -EINVAL;
}
pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
min_sz, max_sz); if (psb_period &&
min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
min_sz, psb_period);
}
/* Set default sizes for full trace mode */ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size; if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
if (sz < min_sz || !is_power_of_2(sz)) {
pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
min_sz / 1024); return -EINVAL;
}
}
if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
if (intel_pt_evsel) { /* * To obtain the auxtrace buffer file descriptor, the auxtrace * event must come first.
*/
evlist__to_front(evlist, intel_pt_evsel); /* * In the case of per-cpu mmaps, we need the CPU on the * AUX event.
*/ if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_pt_evsel, CPU);
}
/* Add dummy event to keep tracking */ if (opts->full_auxtrace) { bool need_system_wide_tracking; struct evsel *tracking_evsel;
/* * User space tasks can migrate between CPUs, so when tracing * selected CPUs, sideband for all CPUs is still needed.
*/
need_system_wide_tracking = opts->target.cpu_list &&
!intel_pt_evsel->core.attr.exclude_user;
tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking); if (!tracking_evsel) return -ENOMEM;
if (need_immediate)
tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */ if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */
evsel__set_sample_bit(tracking_evsel, CPU);
}
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
}
/* * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only).
*/ if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) &&
!target__none(&opts->target) &&
!intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
/** * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer. * @buf1: first buffer * @compare_size: number of bytes to compare * @buf2: second buffer (a circular buffer) * @offs2: offset in second buffer * @buf2_size: size of second buffer * * The comparison allows for the possibility that the bytes to compare in the * circular buffer are not contiguous. It is assumed that @compare_size <= * @buf2_size. This function returns %false if the bytes are identical, %true * otherwise.
*/ staticbool intel_pt_compare_buffers(void *buf1, size_t compare_size, void *buf2, size_t offs2, size_t buf2_size)
{
size_t end2 = offs2 + compare_size, part_size;
if (end2 <= buf2_size) return memcmp(buf1, buf2 + offs2, compare_size);
pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
__func__, idx, (size_t)*old, (size_t)*head);
err = intel_pt_snapshot_init(ptr, mm->len); if (err) goto out_err;
if (idx >= ptr->snapshot_ref_cnt) {
err = intel_pt_alloc_snapshot_refs(ptr, idx); if (err) goto out_err;
}
if (ptr->snapshot_ref_buf_size) { if (!ptr->snapshot_refs[idx].ref_buf) {
err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len); if (err) goto out_err;
}
wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
} else {
wrapped = ptr->snapshot_refs[idx].wrapped; if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
ptr->snapshot_refs[idx].wrapped = true;
wrapped = true;
}
}
/* * In full trace mode 'head' continually increases. However in snapshot * mode 'head' is an offset within the buffer. Here 'old' and 'head' * are adjusted to match the full trace case which expects that 'old' is * always less than 'head'.
*/ if (wrapped) {
*old = *head;
*head += mm->len;
} else { if (mm->mask)
*old &= mm->mask; else
*old %= mm->len; if (*old > *head)
*head += mm->len;
}
pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
__func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.