/* * Get the return address for a single stackframe and return a pointer to the * next frame tail.
*/ staticunsignedlong
user_backtrace(struct perf_callchain_entry_ctx *entry, unsignedlong fp)
{ unsignedlong err; unsignedlong __user *user_frame_tail; struct stack_frame buftail;
for (unwind_start(&state, current, regs);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state); if (!addr || perf_callchain_store(entry, addr)) return;
}
}
#define LOONGARCH_MAX_HWEVENTS 32
struct cpu_hw_events { /* Array of events on this cpu. */ struct perf_event *events[LOONGARCH_MAX_HWEVENTS];
/* * Set the bit (indexed by the counter number) when the counter * is used for an event.
*/ unsignedlong used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
/* * Software copy of the control register for each performance counter.
*/ unsignedint saved_ctrl[LOONGARCH_MAX_HWEVENTS];
}; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of LoongArch performance events. */ struct loongarch_perf_event { unsignedint event_id;
};
static u64 loongarch_pmu_read_counter(unsignedint idx)
{
u64 val = -1;
switch (idx) { case 0:
val = read_csr_perfcntr0(); break; case 1:
val = read_csr_perfcntr1(); break; case 2:
val = read_csr_perfcntr2(); break; case 3:
val = read_csr_perfcntr3(); break; default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); return 0;
}
return val;
}
staticvoid loongarch_pmu_write_counter(unsignedint idx, u64 val)
{ switch (idx) { case 0:
write_csr_perfcntr0(val); return; case 1:
write_csr_perfcntr1(val); return; case 2:
write_csr_perfcntr2(val); return; case 3:
write_csr_perfcntr3(val); return; default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); return;
}
}
staticunsignedint loongarch_pmu_read_control(unsignedint idx)
{ unsignedint val = -1;
switch (idx) { case 0:
val = read_csr_perfctrl0(); break; case 1:
val = read_csr_perfctrl1(); break; case 2:
val = read_csr_perfctrl2(); break; case 3:
val = read_csr_perfctrl3(); break; default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); return 0;
}
return val;
}
staticvoid loongarch_pmu_write_control(unsignedint idx, unsignedint val)
{ switch (idx) { case 0:
write_csr_perfctrl0(val); return; case 1:
write_csr_perfctrl1(val); return; case 2:
write_csr_perfctrl2(val); return; case 3:
write_csr_perfctrl3(val); return; default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); return;
}
}
staticint loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{ int i;
for (i = 0; i < loongarch_pmu.num_counters; i++) { if (!test_and_set_bit(i, cpuc->used_mask)) return i;
}
staticint loongarch_pmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx)
{ int ret = 0;
u64 left = local64_read(&hwc->period_left);
u64 period = hwc->sample_period;
if (unlikely((left + period) & (1ULL << 63))) { /* left underflowed by more than period. */
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
} elseif (unlikely((left + period) <= period)) { /* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > loongarch_pmu.max_period) {
left = loongarch_pmu.max_period;
local64_set(&hwc->period_left, left);
}
if (!(hwc->state & PERF_HES_STOPPED)) { /* We are working on a local event. */
loongarch_pmu_disable_event(hwc->idx);
barrier();
loongarch_pmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
/* To look for a free counter for this event. */
idx = loongarch_pmu_alloc_counter(cpuc, hwc); if (idx < 0) {
err = idx; goto out;
}
/* * If there is an event in the counter we are going to use then * make sure it is disabled.
*/
event->hw.idx = idx;
loongarch_pmu_disable_event(idx);
cpuc->events[idx] = event;
/* * First we pause the local counters, so that when we are locked * here, the counters are all paused. When it gets locked due to * perf_disable(), the timer interrupt handler will be delayed. * * See also loongarch_pmu_start().
*/
pause_local_counters();
regs = get_irq_regs();
perf_sample_data_init(&data, 0, 0);
for (n = 0; n < loongarch_pmu.num_counters; n++) { if (test_bit(n, cpuc->used_mask)) {
counter = loongarch_pmu.read_counter(n); if (counter & loongarch_pmu.overflow) {
handle_associated_event(cpuc, n, &data, regs);
handled = IRQ_HANDLED;
}
}
}
resume_local_counters();
/* * Do all the work for the pending perf events. We can do this * in here because the performance counter interrupt is a regular * interrupt, not NMI.
*/ if (handled == IRQ_HANDLED)
irq_work_run();
return handled;
}
staticint loongarch_pmu_event_init(struct perf_event *event)
{ int r, irq; unsignedlong flags;
/* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP;
switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break;
default: /* Init it to avoid false validate_group */
event->hw.event_base = 0xffffffff; return -ENOENT;
}
if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV;
/* Returning LoongArch event descriptor for generic perf event. */ if (PERF_TYPE_HARDWARE == event->attr.type) { if (event->attr.config >= PERF_COUNT_HW_MAX) return -EINVAL;
pev = loongarch_pmu_map_general_event(event->attr.config);
} elseif (PERF_TYPE_HW_CACHE == event->attr.type) {
pev = loongarch_pmu_map_cache_event(event->attr.config);
} elseif (PERF_TYPE_RAW == event->attr.type) { /* We are working on the global raw event. */
mutex_lock(&raw_event_mutex);
pev = loongarch_pmu.map_raw_event(event->attr.config);
} else { /* The event type is not (yet) supported. */ return -EOPNOTSUPP;
}
if (IS_ERR(pev)) { if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex); return PTR_ERR(pev);
}
/* * We allow max flexibility on how each individual counter shared * by the single CPU operates (the mode exclusion and the range).
*/
hwc->config_base = CSR_PERFCTRL_IE;
hwc->event_base = loongarch_pmu_perf_event_encode(pev); if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
if (!attr->exclude_user) {
hwc->config_base |= CSR_PERFCTRL_PLV3;
hwc->config_base |= CSR_PERFCTRL_PLV2;
} if (!attr->exclude_kernel) {
hwc->config_base |= CSR_PERFCTRL_PLV0;
} if (!attr->exclude_hv) {
hwc->config_base |= CSR_PERFCTRL_PLV1;
}
hwc->config_base &= M_PERFCTL_CONFIG_MASK; /* * The event can belong to another cpu. We do not assign a local * counter for it for now.
*/
hwc->idx = -1;
hwc->config = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.