/* Used to find the offset and length of dynamic fields in trace events */ struct trace_dynamic_info { #ifdef CONFIG_CPU_BIG_ENDIAN
u16 len;
u16 offset; #else
u16 offset;
u16 len; #endif
} __packed;
/* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: * * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
*/ struct trace_entry { unsignedshort type; unsignedchar flags; unsignedchar preempt_count; int pid;
};
/* * Trace iterator - used by printout routines who present trace * results to users and which routines might sleep, etc:
*/ struct trace_iterator { struct trace_array *tr; struct tracer *trace; struct array_buffer *array_buffer; void *private; int cpu_file; struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsignedlong iter_flags; void *temp; /* temp holder */ unsignedint temp_size; char *fmt; /* modified format holder */ unsignedint fmt_size;
atomic_t wait_index;
/* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq;
cpumask_var_t started;
/* Set when the file is closed to prevent new waiters */ bool closed;
/* it's true when current open file is snapshot */ bool snapshot;
/* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; unsignedlong lost_events; int leftover; int ent_size; int cpu;
u64 ts;
loff_t pos; long idx;
/* All new field here will be zeroed out in pipe_read */
};
trace_ctx = tracing_gen_ctx(); /* * Subtract one from the preemption counter if preemption is enabled, * see trace_event_buffer_reserve()for details.
*/ if (IS_ENABLED(CONFIG_PREEMPTION))
trace_ctx--; return trace_ctx;
}
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER, #ifdef CONFIG_PERF_EVENTS
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE, /* * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a * custom action was taken and the default action is not to be * performed.
*/
TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL, #endif
};
/* * Event flags: * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * TRACEPOINT - Event is a tracepoint * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe * EPROBE - Event is an event probe * FPROBE - Event is an function probe * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) * This is set when the custom event has not been attached * to a tracepoint yet, then it is cleared when it is. * TEST_STR - The event has a "%s" that points to a string outside the event
*/ enum {
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
};
struct trace_event_call { struct list_head list; struct trace_event_class *class; union { constchar *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp;
}; struct trace_event event; char *print_fmt; /* * Static events can disappear with modules, * where as dynamic ones need their own ref count.
*/ union { void *module;
atomic_t refcnt;
}; void *data;
/* See the TRACE_EVENT_FL_* flags above */ int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS staticinlinebool bpf_prog_array_valid(struct trace_event_call *call)
{ /* * This inline function checks whether call->prog_array * is valid or not. The function is called in various places, * outside rcu_read_lock/unlock, as a heuristic to speed up execution. * * If this function returns true, and later call->prog_array * becomes false inside rcu_read_lock/unlock region, * we bail out then. If this function return false, * there is a risk that we might miss a few events if the checking * were delayed until inside rcu_read_lock/unlock region and * call->prog_array happened to become non-NULL then. * * Here, READ_ONCE() is used instead of rcu_access_pointer(). * rcu_access_pointer() requires the actual definition of * "struct bpf_prog_array" while READ_ONCE() only needs * a declaration of the same type.
*/ return !!READ_ONCE(call->prog_array);
} #endif
/* * Event file flags: * ENABLED - The event is enabled * RECORDED_CMD - The comms should be recorded at sched_switch * RECORDED_TGID - The tgids should be recorded at sched_switch * FILTERED - The event has a filter attached * NO_SET_FILTER - Set when filter has error and is to be ignored * SOFT_DISABLED - When set, do not trace the event (even though its * tracepoint may be enabled) * TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_COND - When set, one or more triggers has an associated filter * PID_FILTER - When set, the event is filtered based on pid * WAS_ENABLED - Set when enabled to know to clear trace on module removal * FREED - File descriptor is freed, all fields should be considered invalid
*/ enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
/* * 32 bit flags: * bit 0: enabled * bit 1: enabled cmd record * bit 2: enable/disable with the soft disable bit * bit 3: soft disabled * bit 4: trigger enabled * * Note: The bits must be set atomically to prevent races * from other writers. Reads of flags do not need to be in * sync as they occur in critical sections. But the way flags * is currently used, these changes do not affect the code * except that when a change is made, it may have a slight * delay in propagating the changes to other CPUs due to * caching and such. Which is mostly OK ;-)
*/ unsignedlong flags;
refcount_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
/** * trace_trigger_soft_disabled - do triggers and test if soft disabled * @file: The file pointer of the event to test * * If any triggers without filters are attached to this event, they * will be called here. If the event is soft disabled and has no * triggers that require testing the fields, it will return true, * otherwise false.
*/ static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{ unsignedlong eflags = file->flags;
if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
EVENT_FILE_FL_SOFT_DISABLED |
EVENT_FILE_FL_PID_FILTER)))) returnfalse;
if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND)) returnfalse;
externint trace_event_raw_init(struct trace_event_call *call); externint trace_define_field(struct trace_event_call *call, constchar *type, constchar *name, int offset, int size, int is_signed, int filter_type); externint trace_add_event_call(struct trace_event_call *call); externint trace_remove_event_call(struct trace_event_call *call); externint trace_event_get_offsets(struct trace_event_call *call);
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(constchar *system, constchar *event, int set); int trace_array_set_clr_event(struct trace_array *tr, constchar *system, constchar *event, bool enable);
/* * gcc warns that you can not use a va_list in an inlined * function. But lets me make it into a macro :-/
*/ #define __trace_event_vstr_len(fmt, va) \
({ \
va_list __ap; \ int __ret; \
\
va_copy(__ap, *(va)); \
__ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
va_end(__ap); \
\
min(__ret, TRACE_EVENT_STR_MAX); \
})
#endif/* _LINUX_TRACE_EVENT_H */
/* * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection. * This is due to the way trace custom events work. If a file includes two * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include * will override the TRACE_CUSTOM_EVENT and break the second include.
*/
#ifndef TRACE_CUSTOM_EVENT
#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) #define DEFINE_CUSTOM_EVENT(template, name, proto, args) #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
#endif/* ifdef TRACE_CUSTOM_EVENT (see note above) */
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.