// SPDX-License-Identifier: GPL-2.0-only /* By Ross Biro 1/23/92 */ /* * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000
*/
/** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/ int regs_query_register_offset(constchar *name)
{ conststruct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL;
}
/** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/ constchar *regs_query_register_name(unsignedint offset)
{ conststruct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL;
}
/* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c.
*/
/* * Determines which flags the user has access to [1 = access, 0 = no access].
*/ #define FLAG_MASK_32 ((unsignedlong) \
(X86_EFLAGS_CF | X86_EFLAGS_PF | \
X86_EFLAGS_AF | X86_EFLAGS_ZF | \
X86_EFLAGS_SF | X86_EFLAGS_TF | \
X86_EFLAGS_DF | X86_EFLAGS_OF | \
X86_EFLAGS_RF | X86_EFLAGS_AC))
/* * Determines whether a value may be installed in a segment register.
*/ staticinlinebool invalid_selector(u16 value)
{ return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
}
/* * The value argument was already truncated to 16 bits.
*/ if (invalid_selector(value)) return -EIO;
/* * For %cs and %ss we cannot permit a null selector. * We can permit a bogus selector as long as it has USER_RPL. * Null selectors are fine for other segment registers, but * we will never get back to user mode with invalid %cs or %ss * and will take the trap in iret instead. Much code relies * on user_mode() to distinguish a user trap frame (which can * safely use invalid selectors) from a kernel trap frame.
*/ switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO;
fallthrough;
/* * If the user value contains TF, mark that * it was not "us" (the debugger) that set it. * If not, make sure it stays set if we had.
*/ if (value & X86_EFLAGS_TF)
clear_tsk_thread_flag(task, TIF_FORCED_TF); elseif (test_tsk_thread_flag(task, TIF_FORCED_TF))
value |= X86_EFLAGS_TF;
/* * Store in the virtual DR6 register the fact that the breakpoint * was hit so the thread's debugger will see it.
*/ for (i = 0; i < HBP_NUM; i++) { if (thread->ptrace_bps[i] == bp) break;
}
thread->virtual_dr6 |= (DR_TRAP0 << i);
}
/* * Walk through every ptrace breakpoints for this thread and * build the dr7 value on top of their attributes. *
*/ staticunsignedlong ptrace_get_dr7(struct perf_event *bp[])
{ int i; int dr7 = 0; struct arch_hw_breakpoint *info;
for (i = 0; i < HBP_NUM; i++) { if (bp[i] && !bp[i]->attr.disabled) {
info = counter_arch_bp(bp[i]);
dr7 |= encode_dr7(i, info->len, info->type);
}
}
return dr7;
}
staticint ptrace_fill_bp_fields(struct perf_event_attr *attr, int len, int type, bool disabled)
{ int err, bp_len, bp_type;
staticint ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, int disabled)
{ struct perf_event_attr attr = bp->attr; int err;
err = ptrace_fill_bp_fields(&attr, len, type, disabled); if (err) return err;
return modify_user_hw_breakpoint(bp, &attr);
}
/* * Handle ptrace writes to debug register 7.
*/ staticint ptrace_write_dr7(struct task_struct *tsk, unsignedlong data)
{ struct thread_struct *thread = &tsk->thread; unsignedlong old_dr7; bool second_pass = false; int i, rc, ret = 0;
data &= ~DR_CONTROL_RESERVED;
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
restore:
rc = 0; for (i = 0; i < HBP_NUM; i++) { unsigned len, type; bool disabled = !decode_dr7(data, i, &len, &type); struct perf_event *bp = thread->ptrace_bps[i];
if (!bp) { if (disabled) continue;
bp = ptrace_register_breakpoint(tsk,
len, type, 0, disabled); if (IS_ERR(bp)) {
rc = PTR_ERR(bp); break;
}
thread->ptrace_bps[i] = bp; continue;
}
rc = ptrace_modify_breakpoint(bp, len, type, disabled); if (rc) break;
}
/* Restore if the first pass failed, second_pass shouldn't fail. */ if (rc && !WARN_ON(second_pass)) {
ret = rc;
data = old_dr7;
second_pass = true; goto restore;
}
return ret;
}
/* * Handle PTRACE_PEEKUSR calls for the debug register area.
*/ staticunsignedlong ptrace_get_debugreg(struct task_struct *tsk, int n)
{ struct thread_struct *thread = &tsk->thread; unsignedlong val = 0;
if (n < HBP_NUM) { int index = array_index_nospec(n, HBP_NUM); struct perf_event *bp = thread->ptrace_bps[index];
if (bp)
val = bp->hw.info.address;
} elseif (n == 6) {
val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
} elseif (n == 7) {
val = thread->ptrace_dr7;
} return val;
}
if (!bp) { /* * Put stub len and type to create an inactive but correct bp. * * CHECKME: the previous code returned -EIO if the addr wasn't * a valid task virtual addr. The new one will return -EINVAL in * this case. * -EINVAL may be what we want for in-kernel breakpoints users, * but -EIO looks better for ptrace, since we refuse a register * writing for the user. And anyway this is the previous * behaviour.
*/
bp = ptrace_register_breakpoint(tsk,
X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
addr, true); if (IS_ERR(bp))
err = PTR_ERR(bp); else
t->ptrace_bps[nr] = bp;
} else { struct perf_event_attr attr = bp->attr;
/* * Handle PTRACE_POKEUSR calls for the debug register area.
*/ staticint ptrace_set_debugreg(struct task_struct *tsk, int n, unsignedlong val)
{ struct thread_struct *thread = &tsk->thread; /* There are no DR4 or DR5 registers */ int rc = -EIO;
if (n < HBP_NUM) {
rc = ptrace_set_breakpoint_addr(tsk, n, val);
} elseif (n == 6) {
thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
rc = 0;
} elseif (n == 7) {
rc = ptrace_write_dr7(tsk, val); if (!rc)
thread->ptrace_dr7 = val;
} return rc;
}
/* * These access the current or another (stopped) task's io permission * bitmap for debugging or core dump.
*/ staticint ioperm_active(struct task_struct *target, conststruct user_regset *regset)
{ struct io_bitmap *iobm = target->thread.io_bitmap;
/* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set.
*/ void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child,
regset_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child,
regset_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child,
regset_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child,
regset_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
#ifdef CONFIG_X86_32 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view,
REGSET32_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0;
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view,
REGSET32_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0; #endif
#ifdefined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case PTRACE_GET_THREAD_AREA: if ((int) addr < 0) return -EIO;
ret = do_get_thread_area(child, addr,
(struct user_desc __user *)data); break;
case PTRACE_SET_THREAD_AREA: if ((int) addr < 0) return -EIO;
ret = do_set_thread_area(child, addr,
(struct user_desc __user *)data, 0); break; #endif
#ifdef CONFIG_X86_64 /* normal 64bit interface to access TLS data. Works just like arch_prctl, except that the arguments
are reversed. */ case PTRACE_ARCH_PRCTL:
ret = do_arch_prctl_64(child, data, addr); break; #endif
default:
ret = ptrace_request(child, request, addr, data); break;
}
/* * A 32-bit ptracer on a 64-bit kernel expects that writing * FS or GS will also update the base. This is needed for * operations like PTRACE_SETREGS to fully restore a saved * CPU state.
*/
case offsetof(struct user32, regs.fs):
ret = set_segment_reg(child,
offsetof(struct user_regs_struct, fs),
value); if (ret == 0)
child->thread.fsbase =
x86_fsgsbase_read_task(child, value); return ret;
case offsetof(struct user32, regs.gs):
ret = set_segment_reg(child,
offsetof(struct user_regs_struct, gs),
value); if (ret == 0)
child->thread.gsbase =
x86_fsgsbase_read_task(child, value); return ret;
case offsetof(struct user32, regs.orig_eax): /* * Warning: bizarre corner case fixup here. A 32-bit * debugger setting orig_eax to -1 wants to disable * syscall restart. Make sure that the syscall * restart code sign-extends orig_ax. Also make sure * we interpret the -ERESTART* codes correctly if * loaded into regs->ax in case the task is not * actually still sitting at the exit from a 32-bit * syscall with TS_COMPAT still set.
*/
regs->orig_ax = value; if (syscall_get_nr(child, regs) != -1)
child->thread_info.status |= TS_I386_REGS_POKED; break;
case offsetof(struct user32, regs.eflags): return set_flags(child, value);
case offsetof(struct user32, u_debugreg[0]) ...
offsetof(struct user32, u_debugreg[7]):
regno -= offsetof(struct user32, u_debugreg[0]); return ptrace_set_debugreg(child, regno / 4, value);
switch (request) { case PTRACE_PEEKUSR:
ret = getreg32(child, addr, &val); if (ret == 0)
ret = put_user(val, (__u32 __user *)datap); break;
case PTRACE_POKEUSR:
ret = putreg32(child, addr, data); break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, &user_x86_32_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct32),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_x86_32_view,
REGSET_GENERAL, 0, sizeof(struct user_regs_struct32),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, &user_x86_32_view,
REGSET_FP, 0, sizeof(struct user_i387_ia32_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(
child, &user_x86_32_view, REGSET_FP,
0, sizeof(struct user_i387_ia32_struct), datap);
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view,
REGSET32_XFP, 0, sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view,
REGSET32_XFP, 0, sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_GET_THREAD_AREA: case PTRACE_SET_THREAD_AREA: return arch_ptrace(child, request, addr, data);
switch (request) { /* Read 32bits at location addr in the USER area. Only allow
to return the lower 32bits of segment and debug registers. */ case PTRACE_PEEKUSR: {
u32 tmp;
/* Write the word at location addr in the USER area. Only allow to update segment and debug registers with the upper 32bits
zero-extended. */ case PTRACE_POKEUSR:
ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
addr < offsetof(struct user_regs_struct, cs)) break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child,
&user_x86_64_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child,
&user_x86_64_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child,
&user_x86_64_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child,
&user_x86_64_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
/* * This represents bytes 464..511 in the memory layout exported through * the REGSET_XSTATE interface.
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
/* * This is used by the core dump code to decide which regset to dump. The * core dump code writes out the resulting .e_machine and the corresponding * regsets. This is suboptimal if the task is messing around with its CS.L * field, but at worst the core dump will end up missing some information. * * Unfortunately, it is also used by the broken PTRACE_GETREGSET and * PTRACE_SETREGSET APIs. These APIs look at the .regsets field but have * no way to make sure that the e_machine they use matches the caller's * expectations. The result is that the data format returned by * PTRACE_GETREGSET depends on the returned CS field (and even the offset * of the returned CS field depends on its value!) and the data format * accepted by PTRACE_SETREGSET is determined by the old CS value. The * upshot is that it is basically impossible to use these APIs correctly. * * The best way to fix it in the long run would probably be to add new * improved ptrace() APIs to read and write registers reliably, possibly by * allowing userspace to select the ELF e_machine variant that they expect.
*/ conststruct user_regset_view *task_user_regset_view(struct task_struct *task)
{ #ifdef CONFIG_IA32_EMULATION if (!user_64bit_mode(task_pt_regs(task))) #endif #ifdefined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION return &user_x86_32_view; #endif #ifdef CONFIG_X86_64 return &user_x86_64_view; #endif
}
void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
{ struct task_struct *tsk = current;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.