/* * Report the start and end of the available data in ringbuffer
*/ staticint __perf_mmap__read_init(struct perf_mmap *md)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev; unsignedchar *data = md->base + page_size; unsignedlong size;
md->start = md->overwrite ? head : old;
md->end = md->overwrite ? old : head;
if ((md->end - md->start) < md->flush) return -EAGAIN;
size = md->end - md->start; if (size > (unsignedlong)(md->mask) + 1) { if (!md->overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
/* * Backward ring buffer is full. We still have a chance to read * most of data from it.
*/ if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) return -EINVAL;
}
return 0;
}
int perf_mmap__read_init(struct perf_mmap *map)
{ /* * Check if event was unmapped due to a POLLHUP/POLLERR.
*/ if (!refcount_read(&map->refcnt)) return -ENOENT;
return __perf_mmap__read_init(map);
}
/* * Mandatory for overwrite mode * The direction of overwrite mode is backward. * The last perf_mmap__read() will set tail to map->core.prev. * Need to correct the map->core.prev to head which is the end of next read.
*/ void perf_mmap__read_done(struct perf_mmap *map)
{ /* * Check if event was unmapped due to a POLLHUP/POLLERR.
*/ if (!refcount_read(&map->refcnt)) return;
map->prev = perf_mmap__read_head(map);
}
/* When check_messup is true, 'end' must points to a good entry */ staticunion perf_event *perf_mmap__read(struct perf_mmap *map,
u64 *startp, u64 end)
{ unsignedchar *data = map->base + page_size; union perf_event *event = NULL; int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
if (size < sizeof(event->header) || diff < (int)size) return NULL;
/* * Event straddles the mmap boundary -- header should always * be inside due to u64 alignment of output.
*/ if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { unsignedint offset = *startp; unsignedint len = size, cpy; void *dst = map->event_copy;
if (size > map->event_copy_sz) {
dst = realloc(map->event_copy, size); if (!dst) return NULL;
map->event_copy = dst;
map->event_copy_sz = size;
}
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *)map->event_copy;
}
*startp += size;
}
return event;
}
/* * Read event from ring buffer one by one. * Return one event for each call. * * Usage: * perf_mmap__read_init() * while(event = perf_mmap__read_event()) { * //process the event * perf_mmap__consume() * } * perf_mmap__read_done()
*/ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
{ union perf_event *event;
/* * Check if event was unmapped due to a POLLHUP/POLLERR.
*/ if (!refcount_read(&map->refcnt)) return NULL;
/* non-overwrite doesn't pause the ringbuffer */ if (!map->overwrite)
map->end = perf_mmap__read_head(map);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.