/* list of kprobes for multi-handler support */ struct list_head list;
/*count the number of times this probe was temporarily disarmed */ unsignedlong nmissed;
/* location of the probe point */
kprobe_opcode_t *addr;
/* Allow user to indicate symbol name of the probe point */ constchar *symbol_name;
/* Offset into the symbol */ unsignedint offset;
/* Called before addr is executed. */
kprobe_pre_handler_t pre_handler;
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
/* copy of the original instruction */ struct arch_specific_insn ainsn;
/* * Indicates various status flags. * Protected by kprobe_mutex after this kprobe is registered.
*/
u32 flags;
};
/* Kprobe status flags */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ #define KPROBE_FLAG_OPTIMIZED 4 /* * probe is really optimized. * NOTE: * this flag is only for optimized_kprobe.
*/ #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ #define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */ staticinlinebool kprobe_gone(struct kprobe *p)
{ return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */ staticinlinebool kprobe_disabled(struct kprobe *p)
{ return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */ staticinlinebool kprobe_optimized(struct kprobe *p)
{ return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */ staticinlinebool kprobe_ftrace(struct kprobe *p)
{ return p->flags & KPROBE_FLAG_FTRACE;
}
/* * Function-return probe - * Note: * User needs to provide a handler function, and initialize maxactive. * maxactive - The maximum number of instances of the probed function that * can be active concurrently. * nmissed - tracks the number of times the probed function's return was * ignored, due to maxactive being too low. *
*/ struct kretprobe_holder { struct kretprobe __rcu *rp; struct objpool_head pool;
};
#ifdef CONFIG_KRETPROBES /* Check whether @p is used for implementing a trampoline. */ externint arch_trampoline_kprobe(struct kprobe *p);
#ifdef CONFIG_KRETPROBE_ON_RETHOOK static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{ /* rethook::data is non-changed field, so that you can access it freely. */ return (struct kretprobe *)ri->node.rethook->data;
} static nokprobe_inline unsignedlong get_kretprobe_retaddr(struct kretprobe_instance *ri)
{ return ri->node.ret_addr;
} #else externvoid arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); void arch_kretprobe_fixup_return(struct pt_regs *regs,
kprobe_opcode_t *correct_ret_addr);
void __kretprobe_trampoline(void); /* * Since some architecture uses structured function pointer, * use dereference_function_descriptor() to get real function address.
*/ static nokprobe_inline void *kretprobe_trampoline_addr(void)
{ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
}
/* If the trampoline handler called from a kprobe, use this version */ unsignedlong __kretprobe_trampoline_handler(struct pt_regs *regs, void *frame_pointer);
static nokprobe_inline unsignedlong kretprobe_trampoline_handler(struct pt_regs *regs, void *frame_pointer)
{ unsignedlong ret; /* * Set a dummy kprobe for avoiding kretprobe recursion. * Since kretprobe never runs in kprobe handler, no kprobe must * be running at this point.
*/
kprobe_busy_begin();
ret = __kretprobe_trampoline_handler(regs, frame_pointer);
kprobe_busy_end();
#ifdef CONFIG_KPROBES_ON_FTRACE externvoid kprobe_ftrace_handler(unsignedlong ip, unsignedlong parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs); externint arch_prepare_kprobe_ftrace(struct kprobe *p); /* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */ externbool kprobe_ftrace_disabled __read_mostly; externvoid kprobe_ftrace_kill(void); #else staticinlineint arch_prepare_kprobe_ftrace(struct kprobe *p)
{ return -EINVAL;
} staticinlinevoid kprobe_ftrace_kill(void) {} #endif/* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr);
/* kprobe_running() will just return the current_kprobe on this CPU */ staticinlinestruct kprobe *kprobe_running(void)
{ return __this_cpu_read(current_kprobe);
}
int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num);
int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); int register_kretprobes(struct kretprobe **rps, int num); void unregister_kretprobes(struct kretprobe **rps, int num);
#ifdefined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES) #define kprobe_flush_task(tk) do {} while (0) #else void kprobe_flush_task(struct task_struct *tk); #endif
void kprobe_free_init_mem(void);
int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp);
/* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsignedint trap)
{ if (!IS_ENABLED(CONFIG_KPROBES)) returnfalse; if (user_mode(regs)) returnfalse; /* * To be potentially processing a kprobe fault and to be allowed * to call kprobe_running(), we have to be non-preemptible.
*/ if (preemptible()) returnfalse; if (!kprobe_running()) returnfalse; return kprobe_fault_handler(regs, trap);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.