/* * State machine of bkw_mmap_state: * * .________________(forbid)_____________. * | V * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY * ^ ^ | ^ | * | |__(forbid)____/ |___(forbid)___/| * | | * \_________________(3)_______________/ * * NOTREADY : Backward ring buffers are not ready * RUNNING : Backward ring buffers are recording * DATA_PENDING : We are required to collect data from backward ring buffers * EMPTY : We have collected data from backward ring buffers. * * (0): Setup backward ring buffer * (1): Pause ring buffers for reading * (2): Read from ring buffers * (3): Resume ring buffers for recording
*/ enum bkw_mmap_state {
BKW_MMAP_NOTREADY,
BKW_MMAP_RUNNING,
BKW_MMAP_DATA_PENDING,
BKW_MMAP_EMPTY,
};
struct event_enable_timer;
struct evlist { struct perf_evlist core; bool enabled; int id_pos; int is_pos; int nr_br_cntr;
u64 combined_sample_type; enum bkw_mmap_state bkw_mmap_state; struct { int cork_fd;
pid_t pid;
} workload; struct mmap *mmap; struct mmap *overwrite_mmap; struct evsel *selected; struct events_stats stats; struct perf_session *session; void (*trace_event_sample_raw)(struct evlist *evlist, union perf_event *event, struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time; struct {
pthread_t th; volatileint done;
} thread; struct { int fd; /* control file descriptor */ int ack; /* ack file descriptor for control commands */ int pos; /* index at evlist core object to check signals */
} ctl_fd; struct event_enable_timer *eet; /** * @metric_events: A list of struct metric_event which each have a list * of struct metric_expr.
*/ struct rblist metric_events;
};
int __evlist__parse_mmap_pages(unsignedint *mmap_pages, constchar *str); int evlist__parse_mmap_pages(conststruct option *opt, constchar *str, int unset);
unsignedlong perf_event_mlock_kb_in_pages(void);
int evlist__mmap_ex(struct evlist *evlist, unsignedint pages, unsignedint auxtrace_pages, bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, int comp_level); int evlist__mmap(struct evlist *evlist, unsignedint pages); void evlist__munmap(struct evlist *evlist);
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample); int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp);
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size); int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
/** Iterator state for evlist__for_each_cpu */ struct evlist_cpu_iterator { /** The list being iterated through. */ struct evlist *container; /** The current evsel of the iterator. */ struct evsel *evsel; /** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */ int cpu_map_idx; /** * The CPU map index corresponding to evlist->core.all_cpus for the * current CPU. Distinct from cpu_map_idx as the evsel's cpu map may * contain fewer entries.
*/ int evlist_cpu_map_idx; /** The number of CPU map entries in evlist->core.all_cpus. */ int evlist_cpu_map_nr; /** The current CPU of the iterator. */ struct perf_cpu cpu; /** If present, used to set the affinity when switching between CPUs. */ struct affinity *affinity;
};
/** * evlist__for_each_cpu - without affinity, iterate over the evlist. With * affinity, iterate over all CPUs and then the evlist * for each evsel on that CPU. When switching between * CPUs the affinity is set to the CPU to avoid IPIs * during syscalls. * @evlist_cpu_itr: the iterator instance. * @evlist: evlist instance to iterate. * @affinity: NULL or used to set the affinity to the current CPU.
*/ #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \
!evlist_cpu_iterator__end(&evlist_cpu_itr); \
evlist_cpu_iterator__next(&evlist_cpu_itr))
/** Returns an iterator set to the first CPU/evsel of evlist. */ struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity); /** Move to next element in iterator, updating CPU, evsel and the affinity. */ void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr); /** Returns true when iterator is at the end of the CPUs and evlist. */ bool evlist_cpu_iterator__end(conststruct evlist_cpu_iterator *evlist_cpu_itr);
int evlist__parse_control(constchar *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close); void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close); int evlist__initialize_ctlfd(struct evlist *evlist, int ctl_fd, int ctl_fd_ack); int evlist__finalize_ctlfd(struct evlist *evlist); bool evlist__ctlfd_initialized(struct evlist *evlist); int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd); int evlist__ctlfd_ack(struct evlist *evlist);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.12Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.