struct cpu_hw_events { int n_events; int disabled;
u8 pmcs_enabled; struct perf_event *event[MAX_HWEVENTS];
}; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
staticstruct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex);
switch (idx) { case 0:
val = mfpmr(PMRN_PMC0); break; case 1:
val = mfpmr(PMRN_PMC1); break; case 2:
val = mfpmr(PMRN_PMC2); break; case 3:
val = mfpmr(PMRN_PMC3); break; case 4:
val = mfpmr(PMRN_PMC4); break; case 5:
val = mfpmr(PMRN_PMC5); break; default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
} return val;
}
/* * Write one PMC.
*/ staticvoid write_pmc(int idx, unsignedlong val)
{ switch (idx) { case 0:
mtpmr(PMRN_PMC0, val); break; case 1:
mtpmr(PMRN_PMC1, val); break; case 2:
mtpmr(PMRN_PMC2, val); break; case 3:
mtpmr(PMRN_PMC3, val); break; case 4:
mtpmr(PMRN_PMC4, val); break; case 5:
mtpmr(PMRN_PMC5, val); break; default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/* * Write one local control A register
*/ staticvoid write_pmlca(int idx, unsignedlong val)
{ switch (idx) { case 0:
mtpmr(PMRN_PMLCA0, val); break; case 1:
mtpmr(PMRN_PMLCA1, val); break; case 2:
mtpmr(PMRN_PMLCA2, val); break; case 3:
mtpmr(PMRN_PMLCA3, val); break; case 4:
mtpmr(PMRN_PMLCA4, val); break; case 5:
mtpmr(PMRN_PMLCA5, val); break; default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/* * Write one local control B register
*/ staticvoid write_pmlcb(int idx, unsignedlong val)
{ switch (idx) { case 0:
mtpmr(PMRN_PMLCB0, val); break; case 1:
mtpmr(PMRN_PMLCB1, val); break; case 2:
mtpmr(PMRN_PMLCB2, val); break; case 3:
mtpmr(PMRN_PMLCB3, val); break; case 4:
mtpmr(PMRN_PMLCB4, val); break; case 5:
mtpmr(PMRN_PMLCB5, val); break; default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
/* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs.
*/ do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/* * Disable all events to prevent PMU interrupts and to allow * events to be added or removed.
*/ staticvoid fsl_emb_pmu_disable(struct pmu *pmu)
{ struct cpu_hw_events *cpuhw; unsignedlong flags;
/* * Check if we ever enabled the PMU on this cpu.
*/ if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) { /* * Set the 'freeze all counters' bit, and disable * interrupts. The barrier is to make sure the * mtpmr has been executed and the PMU has frozen * the events before we return.
*/
/* * Re-enable all events if disable == 0. * If we were previously disabled and events were added, then * put the new config on the PMU.
*/ staticvoid fsl_emb_pmu_enable(struct pmu *pmu)
{ struct cpu_hw_events *cpuhw; unsignedlong flags;
local_irq_save(flags);
cpuhw = this_cpu_ptr(&cpu_hw_events); if (!cpuhw->disabled) goto out;
staticint collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[])
{ int n = 0; struct perf_event *event;
if (!is_software_event(group)) { if (n >= max_count) return -1;
ctrs[n] = group;
n++;
}
for_each_sibling_event(event, group) { if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1;
ctrs[n] = event;
n++;
}
} return n;
}
/* context locked on entry */ staticint fsl_emb_pmu_add(struct perf_event *event, int flags)
{ struct cpu_hw_events *cpuhw; int ret = -EAGAIN; int num_counters = ppmu->n_counter;
u64 val; int i;
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/* * Allocate counters from top-down, so that restricted-capable * counters are kept free as long as possible.
*/ for (i = num_counters - 1; i >= 0; i--) { if (cpuhw->event[i]) continue;
val = 0; if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left); if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
/* * TODO: if at least one restricted event exists, and we * just freed up a non-restricted-capable counter, and * there is a restricted-capable counter occupied by * a non-restricted event, migrate that event to the * vacated counter.
*/
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
val = 0; if (left < 0x80000000L)
val = 0x80000000L - left;
write_pmc(event->hw.idx, val);
/* * Release the PMU if this is the last perf_event.
*/ staticvoid hw_perf_event_destroy(struct perf_event *event)
{ if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/* * Translate a generic cache event_id config to a raw event_id code.
*/ staticint hw_perf_cache_event(u64 config, u64 *eventp)
{ unsignedlong type, op, result; int ev;
if (!ppmu->cache_events) return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL;
*eventp = ev; return 0;
}
staticint fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev; struct perf_event *events[MAX_HWEVENTS]; int n; int err; int num_restricted; int i;
if (ppmu->n_counter > MAX_HWEVENTS) {
WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
ppmu->n_counter, MAX_HWEVENTS);
ppmu->n_counter = MAX_HWEVENTS;
}
switch (event->attr.type) { case PERF_TYPE_HARDWARE:
ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP;
ev = ppmu->generic_events[ev]; break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break;
case PERF_TYPE_RAW:
ev = event->attr.config; break;
default: return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev); if (!(event->hw.config & FSL_EMB_EVENT_VALID)) return -EINVAL;
/* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point.
*/
n = 0; if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events); if (n < 0) return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0; for (i = 0; i < n; i++) { if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted) return -EINVAL;
}
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU; if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS; if (event->attr.exclude_idle) return -ENOTSUPP;
/* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0; if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY; else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
/* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted.
*/ staticvoid record_and_restart(struct perf_event *event, unsignedlong val, struct pt_regs *regs)
{
u64 period = event->hw.sample_period; const u64 last_period = event->hw.last_period;
s64 prev, delta, left; int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0); return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/* * See if the total period for this event has expired, * and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) {
left += period; if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
} if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i); if ((int)val < 0) { if (event) { /* event has overflowed */
record_and_restart(event, val, regs);
} else { /* * Disabled counter is negative, * reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.