/* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex);
/* * Stub these out for now, do something more profound later.
*/ int reserve_pmc_hardware(void)
{ return 0;
}
/* * Release the PMU if this is the last perf_event.
*/ staticvoid hw_perf_event_destroy(struct perf_event *event)
{ if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
staticint hw_perf_cache_event(int config, int *evp)
{ unsignedlong type, op, result; int ev;
if (!sh_pmu->cache_events) return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL;
ev = (*sh_pmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL;
*evp = ev; return 0;
}
/* * See if we need to reserve the counter. * * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0; if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware())
err = -EBUSY; else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err) return err;
event->destroy = hw_perf_event_destroy;
switch (attr->type) { case PERF_TYPE_RAW:
config = attr->config & sh_pmu->raw_event_mask; break; case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(attr->config, &config); if (err) return err; break; case PERF_TYPE_HARDWARE: if (attr->config >= sh_pmu->max_events) return -EINVAL;
staticvoid sh_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx)
{
u64 prev_raw_count, new_raw_count;
s64 delta; int shift = 0;
/* * Depending on the counter configuration, they may or may not * be chained, in which case the previous counter value can be * updated underneath us if the lower-half overflows. * * Our tactic to handle this is to first atomically read and * exchange a new raw count - then add that new-prev delta * count to the generic counter atomically. * * As there is no interrupt associated with the overflow events, * this is the simplest approach for maintaining consistency.
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = sh_pmu->read(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count) goto again;
/* * Now we have the new raw value and have updated the prev * timestamp already. We can now calculate the elapsed delta * (counter-)time and add that to the generic counter. * * Careful, not all hw sign-extends above the physical width * of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
int register_sh_pmu(struct sh_pmu *_pmu)
{ if (sh_pmu) return -EBUSY;
sh_pmu = _pmu;
pr_info("Performance Events: %s support registered\n", _pmu->name);
/* * All of the on-chip counters are "limited", in that they have * no interrupts, and are therefore unable to do sampling without * further work and timer assistance.
*/
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.