#define REG_PC 15 #define REG_PSR 16 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c.
*/
#if 0 /* * Breakpoint SWI instruction: SWI &9F0001
*/ #define BREAKINST_ARM 0xef9f0001 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ #else /* * New breakpoints - use an undefined instruction. The ARM architecture * reference manual guarantees that the following instruction space * will produce an undefined instruction exception on all CPUs: * * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx * Thumb: 1101 1110 xxxx xxxx
*/ #define BREAKINST_ARM 0xe7f001f0 #define BREAKINST_THUMB 0xde01 #endif
struct pt_regs_offset { constchar *name; int offset;
};
/** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/ int regs_query_register_offset(constchar *name)
{ conststruct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL;
}
/** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/ constchar *regs_query_register_name(unsignedint offset)
{ conststruct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL;
}
/** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false.
*/ bool regs_within_kernel_stack(struct pt_regs *regs, unsignedlong addr)
{ return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0.
*/ unsignedlong regs_get_kernel_stack_nth(struct pt_regs *regs, unsignedint n)
{ unsignedlong *addr = (unsignedlong *)kernel_stack_pointer(regs);
addr += n; if (regs_within_kernel_stack(regs, (unsignedlong)addr)) return *addr; else return 0;
}
/* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space.
*/ staticinlinelong get_user_reg(struct task_struct *task, int offset)
{ return task_pt_regs(task)->uregs[offset];
}
/* * this routine will put a word on the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space.
*/ staticinlineint
put_user_reg(struct task_struct *task, int offset, long data)
{ struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL;
newregs = *regs;
newregs.uregs[offset] = data;
if (valid_user_regs(&newregs)) {
regs->uregs[offset] = data;
ret = 0;
}
return ret;
}
/* * Called by kernel/ptrace.c when detaching..
*/ void ptrace_disable(struct task_struct *child)
{ /* Nothing to do. */
}
/* * Read the word at offset "off" into the "struct user". We * actually access the pt_regs stored on the kernel stack.
*/ staticint ptrace_read_user(struct task_struct *tsk, unsignedlong off, unsignedlong __user *ret)
{ unsignedlong tmp;
/* * Write the word at offset "off" into "struct user". We * actually access the pt_regs stored on the kernel stack.
*/ staticint ptrace_write_user(struct task_struct *tsk, unsignedlong off, unsignedlong val)
{ if (off & 3 || off >= sizeof(struct user)) return -EIO;
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -EACCES;
iwmmxt_task_release(thread); /* force a reload */ return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Convert a virtual register number into an index for a thread_info * breakpoint array. Breakpoints are identified using positive numbers * whilst watchpoints are negative. The registers are laid out as pairs * of (address, control), each pair mapping to a unique hw_breakpoint struct. * Register 0 is reserved for describing resource information.
*/ staticint ptrace_hbp_num_to_idx(long num)
{ if (num < 0)
num = (ARM_MAX_BRP << 1) - num; return (num - 1) >> 1;
}
/* * Returns the virtual register number for the address of the * breakpoint at index idx.
*/ staticlong ptrace_hbp_idx_to_num(int idx)
{ long mid = ARM_MAX_BRP << 1; long num = (idx << 1) + 1; return num > mid ? mid - num : num;
}
/* * Handle hitting a HW-breakpoint.
*/ staticvoid ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs)
{ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break;
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
/* * Set ptrace breakpoint pointers to zero for this task. * This is required in order to prevent child processes from unregistering * breakpoints held by their parent.
*/ void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
{
memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
}
/* * Unregister breakpoints from this task and reset the pointers in * the thread_struct.
*/ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{ int i; struct thread_struct *t = &tsk->thread;
for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { if (t->debug.hbp[i]) {
unregister_hw_breakpoint(t->debug.hbp[i]);
t->debug.hbp[i] = NULL;
}
}
}
idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL; goto out;
}
if (get_user(user_val, data)) {
ret = -EFAULT; goto out;
}
bp = tsk->thread.debug.hbp[idx]; if (!bp) {
bp = ptrace_hbp_create(tsk, implied_type); if (IS_ERR(bp)) {
ret = PTR_ERR(bp); goto out;
}
tsk->thread.debug.hbp[idx] = bp;
}
attr = bp->attr;
if (num & 0x1) { /* Address */
attr.bp_addr = user_val;
} else { /* Control */
decode_ctrl_reg(user_val, &ctrl);
ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); if (ret) goto out;
if ((gen_type & implied_type) != gen_type) {
ret = -EINVAL; goto out;
}
#ifdef CONFIG_VFP /* * VFP register get/set implementations. * * With respect to the kernel, struct user_fp is divided into three chunks: * 16 or 32 real VFP registers (d0-d15 or d0-31) * These are transferred to/from the real registers in the task's * vfp_hard_struct. The number of registers depends on the kernel * configuration. * * 16 or 0 fake VFP registers (d16-d31 or empty) * i.e., the user_vfp structure has space for 32 registers even if * the kernel doesn't have them all. * * vfp_get() reads this chunk as zero where applicable * vfp_set() ignores this chunk * * 1 word for the FPSCR
*/ staticint vfp_get(struct task_struct *target, conststruct user_regset *regset, struct membuf to)
{ struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
/* * For vfp_set() a read-modify-write is done on the VFP registers, * in order to avoid writing back a half-modified set of registers on * failure.
*/ staticint vfp_set(struct task_struct *target, conststruct user_regset *regset, unsignedint pos, unsignedint count, constvoid *kbuf, constvoid __user *ubuf)
{ int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{ int scno;
if (test_thread_flag(TIF_SYSCALL_TRACE))
report_syscall(regs, PTRACE_SYSCALL_ENTER);
/* Do seccomp after ptrace; syscall may have changed. */ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER if (secure_computing() == -1) return -1; #else /* XXX: remove this once OABI gets fixed */
secure_computing_strict(syscall_get_nr(current, regs)); #endif
/* Tracer or seccomp may have changed syscall. */
scno = syscall_get_nr(current, regs);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, scno);
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{ /* * Audit the syscall before anything else, as a debugger may * come in and change the current registers.
*/
audit_syscall_exit(regs);
/* * Note that we haven't updated the ->syscall field for the * current thread. This isn't a problem because it will have * been set on syscall entry and there hasn't been an opportunity * for a PTRACE_SET_SYSCALL since then.
*/ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
report_syscall(regs, PTRACE_SYSCALL_EXIT);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.41 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.