// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> * * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further * copyright notes.
*/
staticvoid perf_mmap__aio_free(struct mmap *map, int idx)
{
zfree(&(map->aio.data[idx]));
}
staticint perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
{ return 0;
} #endif
staticint perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
{ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks; if (map->aio.nr_cblocks) {
map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); if (!map->aio.aiocb) {
pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); return -1;
}
map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); if (!map->aio.cblocks) {
pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); return -1;
}
map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); if (!map->aio.data) {
pr_debug2("failed to allocate data buffer, error %m\n"); return -1;
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); for (i = 0; i < map->aio.nr_cblocks; ++i) {
ret = perf_mmap__aio_alloc(map, i); if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m"); return -1;
}
ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); if (ret == -1) return -1; /* * Use cblock.aio_fildes value different from -1 * to denote started aio write operation on the * cblock so it requires explicit record__aio_sync() * call prior the cblock may be reused again.
*/
map->aio.cblocks[i].aio_fildes = -1; /* * Allocate cblocks with priority delta to have * faster aio write system calls because queued requests * are kept in separate per-prio queues and adding * a new request will iterate thru shorter per-prio * list. Blocks with numbers higher than * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
*/
prio = delta_max - i;
map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
}
}
return 0;
}
staticvoid perf_mmap__aio_munmap(struct mmap *map)
{ int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
perf_mmap__aio_free(map, i); if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
zfree(&map->aio.aiocb);
} #else/* !HAVE_AIO_SUPPORT */ staticint perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
{ return 0;
}
nr_cpus = perf_cpu_map__nr(cpu_map); for (idx = 0; idx < nr_cpus; idx++) {
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ if (cpu__get_node(cpu) == node)
__set_bit(cpu.cpu, mask->bits);
}
perf_cpu_map__put(cpu_map);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.