/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 Ross Biro * Copyright (C) Linus Torvalds * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle * Copyright (C) 1996 David S. Miller * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999 MIPS Technologies, Inc. * Copyright (C) 2000 Ulf Carlsson * * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit * binaries.
*/ #include <linux/compiler.h> #include <linux/context_tracking.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/stddef.h> #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/ftrace.h>
/* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set.
*/ void ptrace_disable(struct task_struct *child)
{ /* Don't load the watchpoint registers for the ex-child. */
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/* * Read a general register set. We always use the 64-bit format, even * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. * Registers are sign extended to fill the available space.
*/ int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
{ struct pt_regs *regs; int i;
/* * Write a general register set. As for PTRACE_GETREGS, we always use * the 64-bit format. On a 32-bit kernel only the lower order half * (according to endianness) will be used.
*/ int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
{ struct pt_regs *regs; int i;
if (!access_ok(data, 38 * 8)) return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
__get_user(regs->lo, (__s64 __user *)&data->lo);
__get_user(regs->hi, (__s64 __user *)&data->hi);
__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
/* badvaddr, status, and cause may not be written. */
/* System call number may have been changed */
mips_syscall_update_nr(child, regs);
return 0;
}
int ptrace_get_watch_regs(struct task_struct *child, struct pt_watch_regs __user *addr)
{ enum pt_watch_style style; int i;
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(addr, sizeof(struct pt_watch_regs))) return -EIO;
for (i = start; i < num_regs; i++) { /* * Cast all values to signed here so that if this is a 64-bit * kernel, the supplied 32-bit values will be sign extended.
*/ switch (i) { case MIPS32_EF_R1 ... MIPS32_EF_R25: /* k0/k1 are ignored. */ case MIPS32_EF_R28 ... MIPS32_EF_R31:
regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; break; case MIPS32_EF_LO:
regs->lo = (s32)uregs[i]; break; case MIPS32_EF_HI:
regs->hi = (s32)uregs[i]; break; case MIPS32_EF_CP0_EPC:
regs->cp0_epc = (s32)uregs[i]; break;
}
}
/* System call number may have been changed */
mips_syscall_update_nr(target, regs);
for (i = start; i < num_regs; i++) { switch (i) { case MIPS64_EF_R1 ... MIPS64_EF_R25: /* k0/k1 are ignored. */ case MIPS64_EF_R28 ... MIPS64_EF_R31:
regs->regs[i - MIPS64_EF_R0] = uregs[i]; break; case MIPS64_EF_LO:
regs->lo = uregs[i]; break; case MIPS64_EF_HI:
regs->hi = uregs[i]; break; case MIPS64_EF_CP0_EPC:
regs->cp0_epc = uregs[i]; break;
}
}
/* System call number may have been changed */
mips_syscall_update_nr(target, regs);
return 0;
}
#endif/* CONFIG_64BIT */
#ifdef CONFIG_MIPS_FP_SUPPORT
/* * Poke at FCSR according to its mask. Set the Cause bits even * if a corresponding Enable bit is set. This will be noticed at * the time the thread is switched to and SIGFPE thrown accordingly.
*/ staticvoid ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{ int i;
if (!access_ok(data, 33 * 8)) return -EIO;
if (tsk_used_math(child)) { union fpureg *fregs = get_fpu_regs(child); for (i = 0; i < 32; i++)
__put_user(get_fpr64(&fregs[i], 0),
i + (__u64 __user *)data);
} else { for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
__put_user(boot_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{ union fpureg *fregs;
u64 fpr_val;
u32 value; int i;
if (!access_ok(data, 33 * 8)) return -EIO;
init_fp_ctx(child);
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
__get_user(fpr_val, i + (__u64 __user *)data);
set_fpr64(&fregs[i], 0, fpr_val);
}
__get_user(value, data + 64);
ptrace_setfcr31(child, value);
/* FIR may not be written. */
return 0;
}
/* * Copy the floating-point context to the supplied NT_PRFPREG buffer, * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots * correspond 1:1 to buffer slots. Only general registers are copied.
*/ staticvoid fpr_get_fpa(struct task_struct *target, struct membuf *to)
{
membuf_write(to, &target->thread.fpu,
NUM_FPU_REGS * sizeof(elf_fpreg_t));
}
/* * Copy the floating-point context to the supplied NT_PRFPREG buffer, * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's * general register slots are copied to buffer slots. Only general * registers are copied.
*/ staticvoid fpr_get_msa(struct task_struct *target, struct membuf *to)
{ unsignedint i;
BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t)); for (i = 0; i < NUM_FPU_REGS; i++)
membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
}
/* * Copy the floating-point context to the supplied NT_PRFPREG buffer. * Choose the appropriate helper for general registers, and then copy * the FCSR and FIR registers separately.
*/ staticint fpr_get(struct task_struct *target, conststruct user_regset *regset, struct membuf to)
{ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
fpr_get_fpa(target, &to); else
fpr_get_msa(target, &to);
/* * Copy the supplied NT_PRFPREG buffer to the floating-point context, * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP * context's general register slots. Only general registers are copied.
*/ staticint fpr_set_fpa(struct task_struct *target, unsignedint *pos, unsignedint *count, constvoid **kbuf, constvoid __user **ubuf)
{ return user_regset_copyin(pos, count, kbuf, ubuf,
&target->thread.fpu,
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
}
/* * Copy the supplied NT_PRFPREG buffer to the floating-point context, * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 * bits only of FP context's general register slots. Only general * registers are copied.
*/ staticint fpr_set_msa(struct task_struct *target, unsignedint *pos, unsignedint *count, constvoid **kbuf, constvoid __user **ubuf)
{ unsignedint i;
u64 fpr_val; int err;
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
err = user_regset_copyin(pos, count, kbuf, ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t)); if (err) return err;
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
}
return 0;
}
/* * Copy the supplied NT_PRFPREG buffer to the floating-point context. * Choose the appropriate helper for general registers, and then copy * the FCSR register separately. Ignore the incoming FIR register * contents though, as the register is read-only. * * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', * which is supposed to have been guaranteed by the kernel before * calling us, e.g. in `ptrace_regset'. We enforce that requirement, * so that we can safely avoid preinitializing temporaries for * partial register writes.
*/ staticint fpr_set(struct task_struct *target, conststruct user_regset *regset, unsignedint pos, unsignedint count, constvoid *kbuf, constvoid __user *ubuf)
{ constint fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); constint fir_pos = fcr31_pos + sizeof(u32);
u32 fcr31; int err;
BUG_ON(count % sizeof(elf_fpreg_t));
if (pos + count > sizeof(elf_fpregset_t)) return -EIO;
/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ staticint fp_mode_get(struct task_struct *target, conststruct user_regset *regset, struct membuf to)
{ return membuf_store(&to, (int)mips_get_process_fp_mode(target));
}
/* * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. * * We optimize for the case where `count % sizeof(int) == 0', which * is supposed to have been guaranteed by the kernel before calling * us, e.g. in `ptrace_regset'. We enforce that requirement, so * that we can safely avoid preinitializing temporaries for partial * mode writes.
*/ staticint fp_mode_set(struct task_struct *target, conststruct user_regset *regset, unsignedint pos, unsignedint count, constvoid *kbuf, constvoid __user *ubuf)
{ int fp_mode; int err;
if (!tsk_used_math(target)) { /* The task hasn't used FP or MSA, fill with 0xff */
copy_pad_fprs(target, regset, &to, 0);
} elseif (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { /* Copy scalar FP context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 8);
} elseif (sizeof(target->thread.fpu.fpr[0]) == regset->size) { /* Trivially copy the vector registers */
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
} else { /* Copy as much context as possible, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
}
/** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/ int regs_query_register_offset(constchar *name)
{ conststruct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset;
switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data); break;
/* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; unsignedlong tmp = 0;
switch (addr) { case 0 ... 31:
tmp = regs->regs[addr]; break; #ifdef CONFIG_MIPS_FP_SUPPORT case FPR_BASE ... FPR_BASE + 31: { union fpureg *fregs;
if (!tsk_used_math(child)) { /* FP not yet used */
tmp = -1; break;
}
fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1); break;
} #endif
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break;
} case FPC_CSR:
tmp = child->thread.fpu.fcr31; break; case FPC_EIR: /* implementation / version register */
tmp = boot_cpu_data.fpu_id; break; #endif case PC:
tmp = regs->cp0_epc; break; case CAUSE:
tmp = regs->cp0_cause; break; case BADVADDR:
tmp = regs->cp0_badvaddr; break; case MMHI:
tmp = regs->hi; break; case MMLO:
tmp = regs->lo; break; #ifdef CONFIG_CPU_HAS_SMARTMIPS case ACX:
tmp = regs->acx; break; #endif case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO; goto out;
}
dregs = __get_dsp_regs(child);
tmp = dregs[addr - DSP_BASE]; break;
} case DSP_CONTROL: if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO; goto out;
}
tmp = child->thread.dsp.dspcontrol; break; default:
tmp = 0;
ret = -EIO; goto out;
}
ret = put_user(tmp, datalp); break;
}
/* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data); break;
case PTRACE_POKEUSR: { struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) { case 0 ... 31:
regs->regs[addr] = data; /* System call number may have been changed */ if (addr == 2)
mips_syscall_update_nr(child, regs); elseif (addr == 4 &&
mips_syscall_is_indirect(child, regs))
mips_syscall_update_nr(child, regs); break; #ifdef CONFIG_MIPS_FP_SUPPORT case FPR_BASE ... FPR_BASE + 31: { union fpureg *fregs = get_fpu_regs(child);
init_fp_ctx(child); #ifdef CONFIG_32BIT if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data); break;
} #endif
set_fpr64(&fregs[addr - FPR_BASE], 0, data); break;
} case FPC_CSR:
init_fp_ctx(child);
ptrace_setfcr31(child, data); break; #endif case PC:
regs->cp0_epc = data; break; case MMHI:
regs->hi = data; break; case MMLO:
regs->lo = data; break; #ifdef CONFIG_CPU_HAS_SMARTMIPS case ACX:
regs->acx = data; break; #endif case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO; break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data; break;
} case DSP_CONTROL: if (!cpu_has_dsp) {
ret = -EIO; break;
}
child->thread.dsp.dspcontrol = data; break; default: /* The rest are not allowed. */
ret = -EIO; break;
} break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datavp); break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datavp); break;
#ifdef CONFIG_MIPS_FP_SUPPORT case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, datavp); break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, datavp); break; #endif case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datalp); break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child, addrp); break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child, addrp); break;
/* * Notification of system call entry/exit * - triggered by current->work.syscall_trace
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (ptrace_report_syscall_entry(regs)) return -1;
}
if (secure_computing()) return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
/* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now.
*/ if (current_thread_info()->syscall < 0)
syscall_set_return_value(current, regs, -ENOSYS, 0); return current_thread_info()->syscall;
}
/* * Notification of system call entry/exit * - triggered by current->work.syscall_trace
*/
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{ /* * We may come here right after calling schedule_user() * or do_notify_resume(), in which case we can be in RCU * user mode.
*/
user_exit();
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, 0);
user_enter();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.35 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.