/* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING
*/ #ifndef _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H
/* * branch stack layout: * nr: number of taken branches stored in entries[] * hw_idx: The low level index of raw branch records * for the most recent branch. * -1ULL means invalid/unknown. * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. * The entries[] is an abstraction of raw branch records, * which may not be stored in age order in HW, e.g. Intel LBR. * The hw_idx is to expose the low level index of raw * branch record for the most recent branch aka entries[0]. * The hw_idx index is between -1 (unknown) and max depth, * which can be retrieved in /sys/devices/cpu/caps/branches. * For the architectures whose raw branch records are * already stored in age order, the hw_idx should be 0.
*/ struct perf_branch_stack {
u64 nr;
u64 hw_idx; struct perf_branch_entry entries[];
};
struct task_struct;
/* * extra PMU register associated with an event
*/ struct hw_perf_event_extra {
u64 config; /* register value */ unsignedint reg; /* register address or index */ int alloc; /* extra register already allocated */ int idx; /* index in shared_regs->regs[] */
};
/** * struct hw_perf_event - performance event hardware details:
*/ struct hw_perf_event { #ifdef CONFIG_PERF_EVENTS union { struct { /* hardware */
u64 config;
u64 config1;
u64 last_tag;
u64 dyn_constraint; unsignedlong config_base; unsignedlong event_base; int event_base_rdpmc; int idx; int last_cpu; int flags;
struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg;
}; struct { /* aux / Intel-PT */
u64 aux_config; /* * For AUX area events, aux_paused cannot be a state * flag because it can be updated asynchronously to * state.
*/ unsignedint aux_paused;
}; struct { /* software */ struct hrtimer hrtimer;
}; struct { /* tracepoint */ /* for tp_event->class */ struct list_head tp_list;
}; struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
}; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization.
*/ struct arch_hw_breakpoint info; struct rhlist_head bp_list;
}; #endif struct { /* amd_iommu */
u8 iommu_bank;
u8 iommu_cntr;
u16 padding;
u64 conf;
u64 conf1;
};
}; /* * If the event is a per task event, this will point to the task in * question. See the comment in perf_event_alloc().
*/ struct task_struct *target;
/* * PMU would store hardware filter configuration * here.
*/ void *addr_filters;
/* Last sync'ed generation of filters */ unsignedlong addr_filters_gen;
/* * hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
/* the counter is stopped */ #define PERF_HES_STOPPED 0x01
/* * The last observed hardware counter value, updated with a * local64_cmpxchg() such that pmu::read() can be called nested.
*/
local64_t prev_count;
/* * The period to start the next sample with.
*/
u64 sample_period;
union { struct { /* Sampling */ /* * The period we started this sample with.
*/
u64 last_period;
/* * However much is left of the current period; * note that this is a full 64bit value and * allows for generation of periods longer * than hardware might allow.
*/
local64_t period_left;
}; struct { /* Topdown events counting for context switch */
u64 saved_metric;
u64 saved_slots;
};
};
/* * State for throttling the event, see __perf_event_overflow() and * perf_adjust_freq_unthr_context().
*/
u64 interrupts_seq;
u64 interrupts;
/* * State for freq target events, see __perf_event_overflow() and * perf_adjust_freq_unthr_context().
*/
u64 freq_time_stamp;
u64 freq_count_stamp; #endif/* CONFIG_PERF_EVENTS */
};
struct perf_event; struct perf_event_pmu_context;
/* * Common implementation detail of pmu::{start,commit,cancel}_txn
*/
/* txn to add/schedule event on PMU */ #define PERF_PMU_TXN_ADD 0x1
/* txn to read event group from PMU */ #define PERF_PMU_TXN_READ 0x2
/* * various common per-pmu feature flags
*/ int capabilities;
/* * PMU scope
*/ unsignedint scope;
struct perf_cpu_pmu_context * __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms;
/* number of address filters this PMU can do */ unsignedint nr_addr_filters;
/* * Fully disable/enable this PMU, can be used to protect from the PMI * as well as for lazy/batch writing of the MSRs.
*/ void (*pmu_enable) (struct pmu *pmu); /* optional */ void (*pmu_disable) (struct pmu *pmu); /* optional */
/* * Try and initialize the event for this PMU. * * Returns: * -ENOENT -- @event is not for this PMU * * -ENODEV -- @event is for this PMU but PMU not present * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported * -EACCES -- @event is for this PMU, @event is valid, but no privileges * * 0 -- @event is for this PMU and valid * * Other error return values are allowed.
*/ int (*event_init) (struct perf_event *event);
/* * Notification that the event was mapped or unmapped. Called * in the context of the mapping task.
*/ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/* * Flags for ->add()/->del()/ ->start()/->stop(). There are * matching hw_perf_event::state flags.
*/
/* start the counter when adding */ #define PERF_EF_START 0x01
/* reload the counter when starting */ #define PERF_EF_RELOAD 0x02
/* update the counter when stopping */ #define PERF_EF_UPDATE 0x04
/* AUX area event, pause tracing */ #define PERF_EF_PAUSE 0x08
/* AUX area event, resume tracing */ #define PERF_EF_RESUME 0x10
/* * Adds/Removes a counter to/from the PMU, can be done inside a * transaction, see the ->*_txn() methods. * * The add/del callbacks will reserve all hardware resources required * to service the event, this includes any counter constraint * scheduling etc. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on. * * ->add() called without PERF_EF_START should result in the same state * as ->add() followed by ->stop(). * * ->del() must always PERF_EF_UPDATE stop an event. If it calls * ->stop() that must deal with already being stopped without * PERF_EF_UPDATE.
*/ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags);
/* * Starts/Stops a counter present on the PMU. * * The PMI handler should stop the counter when perf_event_overflow() * returns !0. ->start() will be used to continue. * * Also used to change the sample period. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on -- will be called from NMI context with the PMU generates * NMIs. * * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. * * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with * PERF_EF_RESUME. * * ->start() with PERF_EF_RESUME will start as simply as possible but * only if the counter is not otherwise stopped. Will not overlap * another ->start() with PERF_EF_RESUME nor ->stop() with * PERF_EF_PAUSE. * * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other * ->stop()/->start() invocations, just not itself.
*/ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags);
/* * Updates the counter value of the event. * * For sampling capable PMUs this will also update the software period * hw_perf_event::period_left field.
*/ void (*read) (struct perf_event *event);
/* * Group events scheduling is treated as a transaction, add * group events as a whole and perform one schedulability test. * If the test fails, roll back the whole group * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. * * Optional.
*/ void (*start_txn) (struct pmu *pmu, unsignedint txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. * * Optional.
*/ int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. * * Optional.
*/ void (*cancel_txn) (struct pmu *pmu);
/* * Will return the value for perf_event_mmap_page::index for this event, * if no implementation is provided it will default to 0 (see * perf_event_idx_default).
*/ int (*event_idx) (struct perf_event *event); /*optional */
/* * Kmem cache of PMU specific data
*/ struct kmem_cache *task_ctx_cache;
/* * Set up pmu-private data structures for an AUX area
*/ void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */
/* * Free pmu-private AUX data structures
*/ void (*free_aux) (void *aux); /* optional */
/* * Take a snapshot of the AUX buffer without touching the event * state, so that preempting ->start()/->stop() callbacks does * not interfere with their logic. Called in PMI context. * * Returns the size of AUX data copied to the output handle. * * Optional.
*/ long (*snapshot_aux) (struct perf_event *event, struct perf_output_handle *handle, unsignedlong size);
/* * Validate address range filters: make sure the HW supports the * requested configuration and number of filters; return 0 if the * supplied filters are valid, -errno otherwise. * * Runs in the context of the ioctl()ing process and is not serialized * with the rest of the PMU callbacks.
*/ int (*addr_filters_validate) (struct list_head *filters); /* optional */
/* * Synchronize address range filter configuration: * translate hw-agnostic filters into hardware configuration in * event::hw::addr_filters. * * Runs as a part of filter sync sequence that is done in ->start() * callback by calling perf_event_addr_filters_sync(). * * May (and should) traverse event::addr_filters::list, for which its * caller provides necessary serialization.
*/ void (*addr_filters_sync) (struct perf_event *event); /* optional */
/* * Check if event can be used for aux_output purposes for * events of this PMU. * * Runs from perf_event_open(). Should return 0 for "no match" * or non-zero for "match".
*/ int (*aux_output_match) (struct perf_event *event); /* optional */
/* * Skip programming this PMU on the given CPU. Typically needed for * big.LITTLE things.
*/ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/* * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/ int (*check_period) (struct perf_event *event, u64 value); /* optional */
};
/** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop * * This is a hardware-agnostic filter configuration as specified by the user.
*/ struct perf_addr_filter { struct list_head entry; struct path path; unsignedlong offset; unsignedlong size; enum perf_addr_filter_action_t action;
};
/** * struct perf_addr_filters_head - container for address range filters * @list: list of filters for this event * @lock: spinlock that serializes accesses to the @list and event's * (and its children's) filter generations. * @nr_file_filters: number of file-based filters * * A child event will use parent's @list (and therefore @lock), so they are * bundled together; see perf_event_addr_filters().
*/ struct perf_addr_filters_head { struct list_head list;
raw_spinlock_t lock; unsignedint nr_file_filters;
};
/* * The normal states are: * * ACTIVE --. * ^ | * | | * sched_{in,out}() | * | | * v | * ,---> INACTIVE --+ <-. * | | | * | {dis,en}able() * sched_in() | | * | OFF <--' --+ * | | * `---> ERROR ------' * * That is: * * sched_in: INACTIVE -> {ACTIVE,ERROR} * sched_out: ACTIVE -> INACTIVE * disable: {ACTIVE,INACTIVE} -> OFF * enable: {OFF,ERROR} -> INACTIVE * * Where {OFF,ERROR} are disabled states. * * Then we have the {EXIT,REVOKED,DEAD} states which are various shades of * defunct events: * * - EXIT means task that the even was assigned to died, but child events * still live, and further children can still be created. But the event * itself will never be active again. It can only transition to * {REVOKED,DEAD}; * * - REVOKED means the PMU the event was associated with is gone; all * functionality is stopped but the event is still alive. Can only * transition to DEAD; * * - DEAD event really is DYING tearing down state and freeing bits. *
*/ enum perf_event_state {
PERF_EVENT_STATE_DEAD = -5,
PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
};
/* * Event capabilities. For event_caps and groups caps. * * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and * cannot be a group leader. If an event with this flag is detached from the * group it is scheduled out and moved into an unrecoverable ERROR state. * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the * PMU scope where it is active.
*/ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define PERF_EV_CAP_SIBLING BIT(2) #define PERF_EV_CAP_READ_SCOPE BIT(3)
/* * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex * as such iteration must hold either lock. However, since ctx->lock is an IRQ * safe lock, and is only held by the CPU doing the modification, having IRQs * disabled is sufficient since it will hold-off the IPIs.
*/ #ifdef CONFIG_PROVE_LOCKING # define lockdep_assert_event_ctx(event) \
WARN_ON_ONCE(__lockdep_enabled && \
(this_cpu_read(hardirqs_enabled) && \
lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD)) #else # define lockdep_assert_event_ctx(event) #endif
/* * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read.
*/ struct list_head sibling_list; struct list_head active_list; /* * Node on the pinned or flexible tree located at the event context;
*/ struct rb_node group_node;
u64 group_index; /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the * group in tact which avoids us using the other two entries.
*/ struct list_head migrate_entry;
struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings;
/* Not serialized. Only written during event initialization. */ int event_caps; /* The cumulative AND of all event_caps for events in this group. */ int group_caps;
unsignedint group_generation; struct perf_event *group_leader; /* * event->pmu will always point to pmu in which this event belongs. * Whereas event->pmu_ctx->pmu may point to other pmu when group of * different pmu events is created.
*/ struct pmu *pmu; void *pmu_private;
/* * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively.
*/
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_context *ctx; /* * event->pmu_ctx points to perf_event_pmu_context in which the event * is added. This pmu_ctx can be of other pmu for sw event when that * sw event is part of a group which also contains non-sw events.
*/ struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/* * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively.
*/
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
/* * Certain events gets forwarded to another pmu internally by over- * writing kernel copy of event->attr.type without user being aware * of it. event->orig_type contains original 'type' requested by * user.
*/
u32 orig_type; #endif/* CONFIG_PERF_EVENTS */
};
/* * ,-----------------------[1:n]------------------------. * V V * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event * | | * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed * (similar to perf_event_context). Locking is as if it were a member of * perf_event_context; specifically: * * modification, both: ctx->mutex && ctx->lock * reading, either: ctx->mutex || ctx->lock * * There is one exception to this; namely put_pmu_ctx() isn't always called * with ctx->mutex held; this means that as long as we can guarantee the epc * has events the above rules hold. * * Specificially, sys_perf_event_open()'s group_leader case depends on * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. * * perf_event holds a refcount on perf_event_context * perf_event holds a refcount on perf_event_pmu_context
*/ struct perf_event_pmu_context { struct pmu *pmu; struct perf_event_context *ctx;
/* * Set when one or more (plausibly active) event can't be scheduled * due to pmu overcommit or pmu constraints, except tolerant to * events not necessary to be active due to scheduling constraints, * such as cgroups.
*/ int rotate_necessary;
};
/** * struct perf_event_context - event context structure * * Used as a container for task events and CPU events as well:
*/ struct perf_event_context { /* * Protect the states of the events in the list, * nr_active, and the list:
*/
raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock.
*/ struct mutex mutex;
/* * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor.
*/ struct perf_event_context *parent_ctx;
u64 parent_gen;
u64 generation; int pin_count; #ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ #endif struct rcu_head rcu_head;
/* * The count of events for which using the switch-out fast path * should be avoided. * * Sum (event->pending_work + events with * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))) * * The SIGTRAP is targeted at ctx->task, as such it won't do changing * that until the signal is delivered.
*/
local_t nr_no_switch_fast;
};
/** * struct perf_ctx_data - PMU specific data for a task * @rcu_head: To avoid the race on free PMU specific data * @refcount: To track users * @global: To track system-wide users * @ctx_cache: Kmem cache of PMU specific data * @data: PMU specific data * * Currently, the struct is only used in Intel LBR call stack mode to * save/restore the call stack of a task on context switches. * * The rcu_head is used to prevent the race on free the data. * The data only be allocated when Intel LBR call stack mode is enabled. * The data will be freed when the mode is disabled. * The content of the data will only be accessed in context switch, which * should be protected by rcu_read_lock(). * * Because of the alignment requirement of Intel Arch LBR, the Kmem cache * is used to allocate the PMU specific data. The ctx_cache is to track * the Kmem cache. * * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct. * When system-wide Intel LBR call stack mode is enabled, a buffer with * constant size will be allocated for each task. * Also, system memory consumption can further grow when the size of * struct perf_ctx_data enlarges.
*/ struct perf_ctx_data { struct rcu_head rcu_head;
refcount_t refcount; int global; struct kmem_cache *ctx_cache; void *data;
};
/* * Per-CPU storage for iterators used in visit_groups_merge. The default * storage is of size 2 to hold the CPU and any CPU event iterators.
*/ int heap_size; struct perf_event **heap; struct perf_event *heap_default[2];
};
/* * perf_cgroup_info keeps track of time_enabled for a cgroup. * This is a per-cpu dynamically allocated data structure.
*/ struct perf_cgroup_info {
u64 time;
u64 timestamp;
u64 timeoffset; int active;
};
/* * Must ensure cgroup is pinned (css_get) before calling * this function. In other words, we cannot call this function * if there is no cgroup event for the current CPU context.
*/ staticinlinestruct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{ return container_of(task_css_check(task, perf_event_cgrp_id,
ctx ? lockdep_is_held(&ctx->lock)
: true), struct perf_cgroup, css);
} #endif/* CONFIG_CGROUP_PERF */
struct perf_sample_data { /* * Fields set by perf_sample_data_init() unconditionally, * group so as to minimize the cachelines touched.
*/
u64 sample_flags;
u64 period;
u64 dyn_size;
/* * Fields commonly set by __perf_event_header__init_id(), * group so as to minimize the cachelines touched.
*/
u64 type; struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id; struct {
u32 cpu;
u32 reserved;
} cpu_entry;
/* * The other fields, optionally {set,used} by * perf_{prepare,output}_sample().
*/
u64 ip; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack;
u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src;
u64 txn;
/* * The extension space for counters is appended after the * struct perf_branch_stack. It is used to store the occurrences * of events of each branch.
*/ if (brs_cntr)
size += brs->nr * sizeof(u64);
/* * Clear all bitfields in the perf_branch_entry. * The to and from fields are not cleared because they are * systematically modified by caller.
*/ staticinlinevoid perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
{
br->mispred = 0;
br->predicted = 0;
br->in_tx = 0;
br->abort = 0;
br->cycles = 0;
br->type = 0;
br->spec = PERF_BR_SPEC_NA;
br->reserved = 0;
}
/* * When generating a perf sample in-line, instead of from an interrupt / * exception, we lack a pt_regs. This is typically used from software events * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. * * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests * - sp for PERF_SAMPLE_CALLCHAIN * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) * * NOTE: assumes @regs is otherwise already 0 filled; this is important for * things like PERF_SAMPLE_REGS_INTR.
*/ staticinlinevoid perf_fetch_caller_regs(struct pt_regs *regs)
{
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
/* * 'Special' version for the scheduler, it hard assumes no recursion, * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption.
*/ static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
if (event->parent)
ifh = &event->parent->addr_filters;
return ifh;
}
staticinlinestruct fasync_struct **perf_event_fasync(struct perf_event *event)
{ /* Only the parent has fasync state */ if (event->parent)
event = event->parent; return &event->fasync;
}
/* * Snapshot branch stack on software events. * * Branch stack can be very useful in understanding software events. For * example, when a long function, e.g. sys_perf_event_open, returns an * errno, it is not obvious why the function failed. Branch stack could * provide very helpful information in this type of scenarios. * * On software event, it is necessary to stop the hardware branch recorder * fast. Otherwise, the hardware register/buffer will be flushed with * entries of the triggering event. Therefore, static call is used to * stop the hardware recorder.
*/
/* * cnt is the number of entries allocated for entries. * Return number of entries copied to .
*/ typedefint (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, unsignedint cnt);
DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.