staticstruct paicrypt_root { /* Anchor to per CPU data */
refcount_t refcnt; /* Overall active events */ struct paicrypt_mapptr __percpu *mapptr;
} paicrypt_root;
/* Free per CPU data when the last event is removed. */ staticvoid paicrypt_root_free(void)
{ if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
free_percpu(paicrypt_root.mapptr);
paicrypt_root.mapptr = NULL;
}
debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
refcount_read(&paicrypt_root.refcnt));
}
/* * On initialization of first event also allocate per CPU data dynamically. * Start with an array of pointers, the array size is the maximum number of * CPUs possible, which might be larger than the number of CPUs currently * online.
*/ staticint paicrypt_root_alloc(void)
{ if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) { /* The memory is already zeroed. */
paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr); if (!paicrypt_root.mapptr) return -ENOMEM;
refcount_set(&paicrypt_root.refcnt, 1);
} return 0;
}
/* Release the PMU if event is the last perf event */ static DEFINE_MUTEX(pai_reserve_mutex);
/* Adjust usage counters and remove allocated memory when all users are * gone.
*/ staticvoid paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
{ struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu); struct paicrypt_map *cpump = mp->mapptr;
static u64 paicrypt_getctr(unsignedlong *page, int nr, bool kernel)
{ if (kernel)
nr += PAI_CRYPTO_MAXCTR; return page[nr];
}
/* Read the counter values. Return value from location in CMP. For event * CRYPTO_ALL sum up all events.
*/ static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
{ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); struct paicrypt_map *cpump = mp->mapptr;
u64 sum = 0; int i;
for (i = 1; i <= paicrypt_cnt; i++) {
u64 val = paicrypt_getctr(cpump->page, i, kernel);
if (!val) continue;
sum += val;
} return sum;
}
static u64 paicrypt_getall(struct perf_event *event)
{
u64 sum = 0;
if (!event->attr.exclude_kernel)
sum += paicrypt_getdata(event, true); if (!event->attr.exclude_user)
sum += paicrypt_getdata(event, false);
return sum;
}
/* Check concurrent access of counting and sampling for crypto events. * This function is called in process context and it is save to block. * When the event initialization functions fails, no other call back will * be invoked. * * Allocate the memory for the event.
*/ staticstruct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
{ struct paicrypt_map *cpump = NULL; struct paicrypt_mapptr *mp; int rc;
/* Allocate node for this event */
mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
cpump = mp->mapptr; if (!cpump) { /* Paicrypt_map allocated? */
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); if (!cpump) {
rc = -ENOMEM; goto free_root;
}
INIT_LIST_HEAD(&cpump->syswide_list);
}
/* Allocate memory for counter page and counter extraction. * Only the first counting event has to allocate a page.
*/ if (cpump->page) {
refcount_inc(&cpump->refcnt); goto unlock;
}
/* * On error all cpumask are freed and all events have been destroyed. * Save of which CPUs data structures have been allocated for. * Release them in paicrypt_event_destroy call back function * for this event.
*/
PAI_CPU_MASK(event) = maskptr;
rc = 0;
out: return rc;
}
/* Might be called on different CPU than the one the event is intended for. */ staticint paicrypt_event_init(struct perf_event *event)
{ struct perf_event_attr *a = &event->attr; struct paicrypt_map *cpump; int rc = 0;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) return -ENOENT; /* PAI crypto event must be in valid range, try others if not */ if (a->config < PAI_CRYPTO_BASE ||
a->config > PAI_CRYPTO_BASE + paicrypt_cnt) return -ENOENT; /* Allow only CRYPTO_ALL for sampling */ if (a->sample_period && a->config != PAI_CRYPTO_BASE) return -EINVAL; /* Get a page to store last counter values for sampling */ if (a->sample_period) {
PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); if (!PAI_SAVE_AREA(event)) {
rc = -ENOMEM; goto out;
}
}
if (a->sample_period) {
a->sample_period = 1;
a->freq = 0; /* Register for paicrypt_sched_task() to be called */
event->attach_state |= PERF_ATTACH_SCHED_CB; /* Add raw data which contain the memory mapped counters */
a->sample_type |= PERF_SAMPLE_RAW; /* Turn off inheritance */
a->inherit = 0;
}
/* Create raw data and save it in buffer. Calculate the delta for each * counter between this invocation and the last invocation. * Returns number of bytes copied. * Saves only entries with positive counter difference of the form * 2 bytes: Number of counter * 8 bytes: Value of counter
*/ static size_t paicrypt_copy(struct pai_userdata *userdata, unsignedlong *page, unsignedlong *page_old, bool exclude_user, bool exclude_kernel)
{ int i, outidx = 0;
for (i = 1; i <= paicrypt_cnt; i++) {
u64 val = 0, val_old = 0;
if (!exclude_kernel) {
val += paicrypt_getctr(page, i, true);
val_old += paicrypt_getctr(page_old, i, true);
} if (!exclude_user) {
val += paicrypt_getctr(page, i, false);
val_old += paicrypt_getctr(page_old, i, false);
} if (val >= val_old)
val -= val_old; else
val = (~0ULL - val_old) + val + 1; if (val) {
userdata[outidx].num = i;
userdata[outidx].value = val;
outidx++;
}
} return outidx * sizeof(struct pai_userdata);
}
/* Check if there is data to be saved on schedule out of a task. */ staticvoid paicrypt_have_sample(struct perf_event *event, struct paicrypt_map *cpump)
{
size_t rawsize;
if (!event) /* No event active */ return;
rawsize = paicrypt_copy(cpump->save, cpump->page,
(unsignedlong *)PAI_SAVE_AREA(event),
event->attr.exclude_user,
event->attr.exclude_kernel); if (rawsize) /* No incremented counters */
paicrypt_push_sample(rawsize, cpump, event);
}
/* Check if there is data to be saved on schedule out of a task. */ staticvoid paicrypt_have_samples(void)
{ struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); struct paicrypt_map *cpump = mp->mapptr; struct perf_event *event;
/* Called on schedule-in and schedule-out. No access to event structure, * but for sampling only event CRYPTO_ALL is allowed.
*/ staticvoid paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, struct task_struct *task, bool sched_in)
{ /* We started with a clean page on event installation. So read out * results on schedule_out and if page was dirty, save old values.
*/ if (!sched_in)
paicrypt_have_samples();
}
/* Attribute definitions for paicrypt interface. As with other CPU * Measurement Facilities, there is one attribute per mapped counter. * The number of mapped counters may vary per machine generation. Use * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction * to determine the number of mapped counters. The instructions returns * a positive number, which is the highest number of supported counters. * All counters less than this number are also supported, there are no * holes. A returned number of zero means no support for mapped counters. * * The identification of the counter is a unique number. The chosen range * is 0x1000 + offset in mapped kernel page. * All CPU Measurement Facility counters identifiers must be unique and * the numbers from 0 to 496 are already used for the CPU Measurement * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already * used for the CPU Measurement Sampling facility.
*/
PMU_FORMAT_ATTR(event, "config:0-63");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.