if (!last) {
list_add(&new->list, &oe->events);
oe->max_timestamp = timestamp; return;
}
/* * last event might point to some random place in the list as it's * the last queued event. We expect that the new event is close to * this.
*/ if (last->timestamp <= timestamp) { while (last->timestamp <= timestamp) {
p = last->list.next; if (p == &oe->events) {
list_add_tail(&new->list, &oe->events);
oe->max_timestamp = timestamp; return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add_tail(&new->list, &last->list);
} else { while (last->timestamp > timestamp) {
p = last->list.prev; if (p == &oe->events) {
list_add(&new->list, &oe->events); return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add(&new->list, &last->list);
}
}
staticunion perf_event *__dup_event(struct ordered_events *oe, union perf_event *event)
{ union perf_event *new_event = NULL;
if (oe->cur_alloc_size < oe->max_alloc_size) {
new_event = memdup(event, event->header.size); if (new_event)
oe->cur_alloc_size += event->header.size;
}
new_event = dup_event(oe, event); if (!new_event) return NULL;
/* * We maintain the following scheme of buffers for ordered * event allocation: * * to_free list -> buffer1 (64K) * buffer2 (64K) * ... * * Each buffer keeps an array of ordered events objects: * buffer -> event[0] * event[1] * ... * * Each allocated ordered event is linked to one of * following lists: * - time ordered list 'events' * - list of currently removed events 'cache' * * Allocation of the ordered event uses the following order * to get the memory: * - use recently removed object from 'cache' list * - use available object in current allocation buffer * - allocate new buffer if the current buffer is full * * Removal of ordered event object moves it from events to * the cache list.
*/
size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
if (!list_empty(cache)) { new = list_entry(cache->next, struct ordered_event, list);
list_del_init(&new->list);
} elseif (oe->buffer) { new = &oe->buffer->event[oe->buffer_idx]; if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
oe->buffer = NULL;
} elseif ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
oe->buffer = malloc(size); if (!oe->buffer) {
free_dup_event(oe, new_event); return NULL;
}
/* * Current buffer might not have all the events allocated * yet, we need to free only allocated ones ...
*/ if (oe->buffer) {
list_del_init(&oe->buffer->list);
ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
}
/* ... and continue with the rest */
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
list_del_init(&buffer->list);
ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.