// SPDX-License-Identifier: GPL-2.0 /* * RISC-V performance counter support. * * Copyright (C) 2021 Western Digital Corporation or its affiliates. * * This implementation is based on old RISC-V perf and ARM perf event code * which are in turn based on sparc64 and x86 code.
*/
/* * The counters are 64-bit but the priv spec doesn't mandate all the * bits to be implemented: that's why, counter width can vary based on * the cpu vendor.
*/ if (userpg->cap_user_rdpmc)
userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
/* * Subtract the cycle base, such that software that * doesn't know about cap_user_time_short still 'works' * assuming no wraps.
*/
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns;
} while (sched_clock_read_retry(seq));
userpg->time_offset = userpg->time_zero - now;
/* * time_shift is not expected to be greater than 31 due to * the original published conversion algorithm shifting a * 32-bit value (now specifies a 64-bit value) - refer * perf_event_mmap_page documentation in perf_event.h.
*/ if (userpg->time_shift == 32) {
userpg->time_shift = 31;
userpg->time_mult >>= 1;
}
/* * Internal timekeeping for enabled/running/stopped times * is always computed with the sched_clock.
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
if (hwc->idx == -1) /* Handle init case where idx is not initialized yet */
cwidth = rvpmu->ctr_get_width(0); else
cwidth = rvpmu->ctr_get_width(hwc->idx);
if (!(hwc->state & PERF_HES_STOPPED)) { if (rvpmu->ctr_stop) {
rvpmu->ctr_stop(event, 0);
hwc->state |= PERF_HES_STOPPED;
}
riscv_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
int riscv_pmu_event_set_period(struct perf_event *event)
{ struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period; int overflow = 0;
uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
/* * Limit the maximum period to prevent the counter value * from overtaking the one we are about to program. In * effect we are reducing max_period to account for * interrupt latency (and we are being very conservative).
*/ if (left > (max_period >> 1))
left = (max_period >> 1);
riscv_pmu_stop(event, PERF_EF_UPDATE);
cpuc->events[hwc->idx] = NULL; /* The firmware need to reset the counter mapping */ if (rvpmu->ctr_stop)
rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
cpuc->n_events--; if (rvpmu->ctr_clear_idx)
rvpmu->ctr_clear_idx(event);
perf_event_update_userpage(event);
hwc->idx = -1;
}
/* * idx is set to -1 because the index of a general event should not be * decided until binding to some counter in pmu->add(). * config will contain the information about counter CSR * the idx will contain the counter index
*/
hwc->config = event_config;
hwc->idx = -1;
hwc->event_base = mapped_event;
if (rvpmu->event_init)
rvpmu->event_init(event);
if (!is_sampling_event(event)) { /* * For non-sampling runs, limit the sample_period to half * of the counter width. That way, the new counter value * is far less likely to overtake the previous one unless * you have some serious IRQ latency issues.
*/
cmask = riscv_pmu_ctr_get_width_mask(event);
hwc->sample_period = cmask >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.