/* * bit 4: TLB access * 1 = stored missed 2nd level TLB * * so it either hit the walker or the OS * otherwise hit 2nd level TLB
*/ if (dse.st_stlb_miss)
val |= P(TLB, MISS); else
val |= P(TLB, HIT);
/* * bit 0: hit L1 data cache * if not set, then all we know is that * it missed L1D
*/ if (dse.st_l1d_hit)
val |= P(LVL, HIT); else
val |= P(LVL, MISS);
/* * bit 5: Locked prefix
*/ if (dse.st_locked)
val |= P(LOCK, LOCKED);
/* * Ice Lake and earlier models do not support block infos.
*/ if (!x86_pmu.pebs_block) {
val |= P(BLK, NA); return val;
} /* * bit 6: load was blocked since its data could not be forwarded * from a preceding store
*/ if (dse.ld_data_blk)
val |= P(BLK, DATA);
/* * bit 7: load was blocked due to potential address conflict with * a preceding store
*/ if (dse.ld_addr_blk)
val |= P(BLK, ADDR);
if (!dse.ld_data_blk && !dse.ld_addr_blk)
val |= P(BLK, NA);
return val;
}
static u64 store_latency_data(struct perf_event *event, u64 status)
{ union intel_x86_pebs_dse dse; union perf_mem_data_src src;
u64 val;
dse.val = status;
/* * use the mapping table for bit 0-3
*/
val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse];
preempt_disable(); for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
cea_set_pte(cea, pa, prot);
/* * This is a cross-CPU update of the cpu_entry_area, we must shoot down * all TLB entries for it.
*/
flush_tlb_kernel_range(start, start + size);
preempt_enable();
}
for_each_possible_cpu(cpu) { /* * Again, ignore errors from offline CPUs, they will no longer * observe cpu_hw_events.ds and not program the DS_AREA when * they come up.
*/
fini_debug_store_on_cpu(cpu);
}
for_each_possible_cpu(cpu) { if (x86_pmu.ds_pebs)
release_pebs_buffer(cpu);
release_bts_buffer(cpu);
}
}
void reserve_ds_buffers(void)
{ int bts_err = 0, pebs_err = 0; int cpu;
if (!bts_err && alloc_bts_buffer(cpu))
bts_err = 1;
if (x86_pmu.ds_pebs && !pebs_err &&
alloc_pebs_buffer(cpu))
pebs_err = 1;
if (bts_err && pebs_err) break;
}
if (bts_err) {
for_each_possible_cpu(cpu)
release_bts_buffer(cpu);
}
if (x86_pmu.ds_pebs && pebs_err) {
for_each_possible_cpu(cpu)
release_pebs_buffer(cpu);
}
if (bts_err && pebs_err) {
for_each_possible_cpu(cpu)
release_ds_buffer(cpu);
} else { if (x86_pmu.bts && !bts_err)
x86_pmu.bts_active = 1;
if (x86_pmu.ds_pebs && !pebs_err)
x86_pmu.pebs_active = 1;
for_each_possible_cpu(cpu) { /* * Ignores wrmsr_on_cpu() errors for offline CPUs they * will get this call through intel_pmu_cpu_starting().
*/
init_debug_store_on_cpu(cpu);
}
}
}
/* * BTS leaks kernel addresses in branches across the cpl boundary, * such as traps or system calls, so unless the user is asking for * kernel tracing (and right now it's not possible), we'd need to * filter them out. But first we need to count how many of those we * have in the current batch. This is an extra O(n) pass, however, * it's much faster than the other one especially considering that * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the * alloc_bts_buffer()).
*/ for (at = base; at < top; at++) { /* * Note that right now *this* BTS code only works if * attr::exclude_kernel is set, but let's keep this extra * check here in case that changes.
*/ if (event->attr.exclude_kernel &&
(kernel_ip(at->from) || kernel_ip(at->to)))
skip++;
}
/* * Prepare a generic sample, i.e. fill in the invariant fields. * We will overwrite the from and to address before we output * the sample.
*/
rcu_read_lock();
perf_prepare_sample(&data, event, ®s);
perf_prepare_header(&header, &data, event, ®s);
if (perf_output_begin(&handle, &data, event,
header.size * (top - base - skip))) goto unlock;
for (at = base; at < top; at++) { /* Filter out any records that contain kernel addresses. */ if (event->attr.exclude_kernel &&
(kernel_ip(at->from) || kernel_ip(at->to))) continue;
struct event_constraint intel_slm_pebs_event_constraints[] = { /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), /* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_glm_pebs_event_constraints[] = { /* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_grt_pebs_event_constraints[] = { /* Allow all events as PEBS with no flags */
INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
EVENT_CONSTRAINT_END
};
if (pebs_constraints) {
for_each_event_constraint(c, pebs_constraints) { if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags; return c;
}
}
}
/* * Extended PEBS support * Makes the PEBS code search the normal constraints.
*/ if (x86_pmu.flags & PMU_FL_PEBS_ALL) return NULL;
return &emptyconstraint;
}
/* * We need the sched_task callback even for per-cpu events when we use * the large interrupt threshold, such that we can provide PID and TID * to PEBS samples.
*/ staticinlinebool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{ if (cpuc->n_pebs == cpuc->n_pebs_via_pt) returnfalse;
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i]; if (!is_pebs_counter_event_group(event)) continue;
__intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg);
}
if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
attr->precise_ip > 1) return pebs_data_cfg;
if (sample_type & PERF_PEBS_MEMINFO_TYPE)
pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
/* * We need GPRs when: * + user requested them * + precise_ip < 2 for the non event IP * + For RTM TSX weight we need GPRs for the abort code.
*/
gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_GP_REGS)) ||
((sample_type & PERF_SAMPLE_REGS_USER) &&
(attr->sample_regs_user & PEBS_GP_REGS));
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { /* * For now always log all LBRs. Could configure this * later.
*/
pebs_data_cfg |= PEBS_DATACFG_LBRS |
((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
}
/* * Make sure we get updated with the first PEBS event. * During removal, ->pebs_data_cfg is still valid for * the last PEBS event. Don't clear it.
*/ if ((cpuc->n_pebs == 1) && add)
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) { if (!needed_cb)
perf_sched_cb_inc(pmu); else
perf_sched_cb_dec(pmu);
cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}
/* * The PEBS record doesn't shrink on pmu::del(). Doing so would require * iterating all remaining PEBS events to reconstruct the config.
*/ if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;
pebs_data_cfg = pebs_update_adaptive_cfg(event); /* * Be sure to update the thresholds when we change the record.
*/ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
}
/* * Use auto-reload if possible to save a MSR write in the PMI. * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[idx] = 0;
}
if (cpuc->pebs_enabled)
__intel_pmu_pebs_disable_all();
}
staticint intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsignedlong from = cpuc->lbr_entries[0].from; unsignedlong old_to, to = cpuc->lbr_entries[0].to; unsignedlong ip = regs->ip; int is_64bit = 0; void *kaddr; int size;
/* * We don't need to fixup if the PEBS assist is fault like
*/ if (!x86_pmu.intel_cap.pebs_trap) return 1;
/* * No LBR entry, no basic block, no rewinding
*/ if (!cpuc->lbr_stack.nr || !from || !to) return 0;
/* * Basic blocks should never cross user/kernel boundaries
*/ if (kernel_ip(ip) != kernel_ip(to)) return 0;
/* * unsigned math, either ip is before the start (impossible) or * the basic block is larger than 1 page (sanity)
*/ if ((ip - to) > PEBS_FIXUP_SIZE) return 0;
/* * We sampled a branch insn, rewind using the LBR stack
*/ if (ip == to) {
set_linear_ip(regs, from); return 1;
}
size = ip - to; if (!kernel_ip(ip)) { int bytes;
u8 *buf = this_cpu_read(insn_buffer);
/* 'size' must fit our buffer, see above */
bytes = copy_from_user_nmi(buf, (void __user *)to, size); if (bytes != 0) return 0;
/* * Make sure there was not a problem decoding the instruction. * This is doubly important because we have an infinite loop if * insn.length=0.
*/ if (insn_get_length(&insn)) break;
to += insn.length;
kaddr += insn.length;
size -= insn.length;
} while (to < ip);
if (to == ip) {
set_linear_ip(regs, old_to); return 1;
}
/* * Even though we decoded the basic block, the instruction stream * never matched the given IP, either the TO or the IP got corrupted.
*/ return 0;
}
if (fl & PERF_X86_EVENT_PEBS_LDLAT)
val = load_latency_data(event, aux); elseif (fl & PERF_X86_EVENT_PEBS_STLAT)
val = store_latency_data(event, aux); elseif (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID)
val = x86_pmu.pebs_latency_data(event, aux); elseif (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
val = precise_datala_hsw(event, aux); elseif (fst)
val = precise_store_data(aux); return val;
}
staticvoid setup_pebs_time(struct perf_event *event, struct perf_sample_data *data,
u64 tsc)
{ /* Converting to a user-defined clock is not supported yet. */ if (event->attr.use_clockid != 0) return;
/* * Doesn't support the conversion when the TSC is unstable. * The TSC unstable case is a corner case and very unlikely to * happen. If it happens, the TSC in a PEBS record will be * dropped and fall back to perf_event_clock().
*/ if (!using_native_sched_clock() || !sched_clock_stable()) return;
/* * Use latency for weight (only avail with PEBS-LL)
*/ if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) {
data->weight.full = pebs->lat;
data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
}
/* * data.data_src encodes the data source
*/ if (sample_type & PERF_SAMPLE_DATA_SRC) {
data->data_src.val = get_data_src(event, pebs->dse);
data->sample_flags |= PERF_SAMPLE_DATA_SRC;
}
/* * We must however always use iregs for the unwinder to stay sane; the * record BP,SP,IP can point into thin air when the record is from a * previous PMI context or an (I)RET happened between the record and * PMI.
*/
perf_sample_save_callchain(data, event, iregs);
/* * We use the interrupt regs as a base because the PEBS record does not * contain a full regs set, specifically it seems to lack segment * descriptors, which get used by things like user_mode(). * * In the simple case fix up only the IP for PERF_SAMPLE_IP.
*/
*regs = *iregs;
/* * Initialize regs_>flags from PEBS, * Clear exact bit (which uses x86 EFLAGS Reserved bit 3), * i.e., do not rely on it being zero:
*/
regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
if (event->attr.precise_ip > 1) { /* * Haswell and later processors have an 'eventing IP' * (real IP) which fixes the off-by-1 skid in hardware. * Use it when precise_ip >= 2 :
*/ if (x86_pmu.intel_cap.pebs_format >= 2) {
set_linear_ip(regs, pebs->real_ip);
regs->flags |= PERF_EFLAGS_EXACT;
} else { /* Otherwise, use PEBS off-by-1 IP: */
set_linear_ip(regs, pebs->ip);
/* * With precise_ip >= 2, try to fix up the off-by-1 IP * using the LBR. If successful, the fixup function * corrects regs->ip and calls set_linear_ip() on regs:
*/ if (intel_pmu_pebs_fixup_ip(regs))
regs->flags |= PERF_EFLAGS_EXACT;
}
} else { /* * When precise_ip == 1, return the PEBS off-by-1 IP, * no fixup attempted:
*/
set_linear_ip(regs, pebs->ip);
}
if (x86_pmu.intel_cap.pebs_format >= 2) { /* Only set the TSX weight when no memory weight. */ if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) {
data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning);
data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
} if (sample_type & PERF_SAMPLE_TRANSACTION) {
data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
pebs->ax);
data->sample_flags |= PERF_SAMPLE_TRANSACTION;
}
}
/* * v3 supplies an accurate time stamp, so we use that * for the time stamp. * * We can only do this for the default trace clock.
*/ if (x86_pmu.intel_cap.pebs_format >= 3)
setup_pebs_time(event, data, pebs->tsc);
/* * A recorded counter may not have an assigned event in the * following cases. The value should be dropped. * - An event is deleted. There is still an active PEBS event. * The PEBS record doesn't shrink on pmu::del(). * If the counter of the deleted event once occurred in a PEBS * record, PEBS still records the counter until the counter is * reassigned. * - An event is stopped for some reason, e.g., throttled. * During this period, another event is added and takes the * counter of the stopped event. The stopped event is assigned * to another new and uninitialized counter, since the * x86_pmu_start(RELOAD) is not invoked for a stopped event. * The PEBS__DATA_CFG is updated regardless of the event state. * The uninitialized counter can be recorded in a PEBS record. * But the cpuc->events[uninitialized_counter] is always NULL, * because the event is stopped. The uninitialized value is * safely dropped.
*/ if (!event) return;
for_each_set_bit(bit, (unsignedlong *)&cntr->fixed, INTEL_PMC_MAX_FIXED) { /* The slots event will be handled with perf_metric later */ if ((cntr->metrics == INTEL_CNTR_METRICS) &&
(bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) {
next_record += sizeof(u64); continue;
}
intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED],
*(u64 *)next_record);
next_record += sizeof(u64);
}
/* HW will reload the value right after the overflow. */ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period);
/* * We must however always use iregs for the unwinder to stay sane; the * record BP,SP,IP can point into thin air when the record is from a * previous PMI context or an (I)RET happened between the record and * PMI.
*/
perf_sample_save_callchain(data, event, iregs);
*regs = *iregs; /* The ip in basic is EventingIP */
set_linear_ip(regs, basic->ip);
regs->flags = PERF_EFLAGS_EXACT;
if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
data->weight.var3_w = basic->retire_latency; else
data->weight.var3_w = 0;
}
/* * The record for MEMINFO is in front of GP * But PERF_SAMPLE_TRANSACTION needs gprs->ax. * Save the pointer here but process later.
*/ if (format_group & PEBS_DATACFG_MEMINFO) {
meminfo = next_record;
next_record = meminfo + 1;
}
if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs);
}
if (format_group & PEBS_DATACFG_MEMINFO) { if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ?
meminfo->cache_latency : meminfo->mem_latency;
if (x86_pmu.flags & PMU_FL_INSTR_LATENCY)
data->weight.var2_w = meminfo->instr_latency;
/* * Although meminfo::latency is defined as a u64, * only the lower 32 bits include the valid data * in practice on Ice Lake and earlier platforms.
*/ if (sample_type & PERF_SAMPLE_WEIGHT) {
data->weight.full = latency ?:
intel_get_tsx_weight(meminfo->tsx_tuning);
} else {
data->weight.var1_dw = (u32)latency ?:
intel_get_tsx_weight(meminfo->tsx_tuning);
}
next_record += sizeof(struct pebs_cntr_header); /* * The PEBS_DATA_CFG is a global register, which is the * superset configuration for all PEBS events. * For the PEBS record of non-sample-read group, ignore * the counter snapshot fields.
*/ if (is_pebs_counter_event_group(event)) {
__setup_pebs_counter_group(cpuc, event, cntr, next_record);
data->sample_flags |= PERF_SAMPLE_READ;
}
nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); if (cntr->metrics == INTEL_CNTR_METRICS)
nr += 2;
next_record += nr * sizeof(u64);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.