/* * Allowed return values from uprobe consumer's handler callback * with following meaning: * * UPROBE_HANDLER_REMOVE * - Remove the uprobe breakpoint from current->mm. * UPROBE_HANDLER_IGNORE * - Ignore ret_handler callback for this consumer.
*/ #define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_IGNORE 2
#define MAX_URETPROBE_DEPTH 64
#define UPROBE_NO_TRAMPOLINE_VADDR (~0UL)
struct uprobe_consumer { /* * handler() can return UPROBE_HANDLER_REMOVE to signal the need to * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is * returned, filter() callback has to be implemented as well and it * should return false to "confirm" the decision to uninstall uprobe * for the current process. If filter() is omitted or returns true, * UPROBE_HANDLER_REMOVE is effectively ignored.
*/ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); int (*ret_handler)(struct uprobe_consumer *self, unsignedlong func, struct pt_regs *regs, __u64 *data); bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
struct list_head cons_node;
__u64 id; /* set when uprobe_consumer is registered */
};
/* * Hybrid lifetime uprobe. Represents a uprobe instance that could be either * SRCU protected (with SRCU protection eventually potentially timing out), * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). * * hprobe's internal state is setup such that background timer thread can * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one * (or no uprobe, if refcounting failed). * * *stable* pointer always point to the uprobe (or could be NULL if there is * was no valid underlying uprobe to begin with). * * *leased* pointer is the key to achieving race-free atomic lifetime state * transition and can have three possible states: * - either the same non-NULL value as *stable*, in which case uprobe is * SRCU-protected; * - NULL, in which case uprobe (if there is any) is refcounted; * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU * protected initially, but SRCU period timed out and we attempted to * convert it to refcounted, but refcount_inc_not_zero() failed, because * uprobe effectively went away (the last consumer unsubscribed). In this * case it's important to know that *stable* pointer (which still has * non-NULL uprobe pointer) shouldn't be used, because lifetime of * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an * internal marker and is handled transparently by hprobe_fetch() helper. * * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for * SRCU unlocking. * * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe * state transitioning details. It all hinges on atomic xchg() over *leaded* * pointer. *stable* pointer, once initially set, is not modified concurrently.
*/ struct hprobe { enum hprobe_state state; int srcu_idx; struct uprobe *uprobe;
};
/* * uprobe_task: Metadata of a task while it singlesteps.
*/ struct uprobe_task { enum uprobe_task_state state;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.