/* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
.x86_tss = { /* * .sp0 is only used when entering ring 0 from a lower * privilege level. Since the init task never runs anything * but ring 0 code, there is no need for a valid value here. * Poison it.
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
/* * this gets called so that we can store lazy state into memory and copy the * current task into the new thread.
*/ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{ /* fpu_clone() will initialize the "dst_fpu" memory */
memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(*dst), 0);
/* Is this a kernel thread? */ if (unlikely(fn)) {
fn(fn_arg); /* * A kernel thread is allowed to return here after successfully * calling kernel_execve(). Exit to userspace to complete the * execve() syscall.
*/
regs->ax = 0;
}
if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM)
set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags); #else
p->thread.sp0 = (unsignedlong) (childregs + 1);
savesegment(gs, p->thread.gs); /* * Clear all status flags including IF and set fixed bit. 64bit * does not have this initialization as the frame does not contain * flags. The flags consistency (especially vs. AC) is there * ensured via objtool, which lacks 32bit support.
*/
frame->flags = X86_EFLAGS_FIXED; #endif
/* * Allocate a new shadow stack for thread if needed. If shadow stack, * is disabled, new_ssp will remain 0, and fpu_clone() will know not to * update it.
*/
new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size); if (IS_ERR_VALUE(new_ssp)) return PTR_ERR((void *)new_ssp);
if (unlikely(args->fn)) { /* * A user space thread, but it doesn't return to * ret_after_fork(). * * In order to indicate that to tools like gdb, * we reset the stack and instruction pointers. * * It does the same kernel frame setup to return to a kernel * function that a kernel thread does.
*/
childregs->sp = 0;
childregs->ip = 0;
kthread_frame_init(frame, args->fn, args->fn_arg); return 0;
}
/* Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS)
ret = set_new_tls(p, tls);
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);
return ret;
}
staticvoid pkru_flush_thread(void)
{ /* * If PKRU is enabled the default PKRU value has to be loaded into * the hardware right here (similar to context switch).
*/
pkru_write_default();
}
void disable_TSC(void)
{
preempt_disable(); if (!test_and_set_thread_flag(TIF_NOTSC)) /* * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context.
*/
cr4_set_bits(X86_CR4_TSD);
preempt_enable();
}
staticvoid enable_TSC(void)
{
preempt_disable(); if (test_and_clear_thread_flag(TIF_NOTSC)) /* * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context.
*/
cr4_clear_bits(X86_CR4_TSD);
preempt_enable();
}
int get_tsc_mode(unsignedlong adr)
{ unsignedint val;
if (test_thread_flag(TIF_NOTSC))
val = PR_TSC_SIGSEGV; else
val = PR_TSC_ENABLE;
staticvoid disable_cpuid(void)
{
preempt_disable(); if (!test_and_set_thread_flag(TIF_NOCPUID)) { /* * Must flip the CPU state synchronously with * TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting(true);
}
preempt_enable();
}
staticvoid enable_cpuid(void)
{
preempt_disable(); if (test_and_clear_thread_flag(TIF_NOCPUID)) { /* * Must flip the CPU state synchronously with * TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting(false);
}
preempt_enable();
}
staticint set_cpuid_mode(unsignedlong cpuid_enabled)
{ if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT)) return -ENODEV;
if (cpuid_enabled)
enable_cpuid(); else
disable_cpuid();
return 0;
}
/* * Called immediately after a successful exec.
*/ void arch_setup_new_exec(void)
{ /* If cpuid was previously disabled for this task, re-enable it. */ if (test_thread_flag(TIF_NOCPUID))
enable_cpuid();
/* * Don't inherit TIF_SSBD across exec boundary when * PR_SPEC_DISABLE_NOEXEC is used.
*/ if (test_thread_flag(TIF_SSBD) &&
task_spec_ssb_noexec(current)) {
clear_thread_flag(TIF_SSBD);
task_clear_spec_ssb_disable(current);
task_clear_spec_ssb_noexec(current);
speculation_ctrl_update(read_thread_flags());
}
mm_reset_untag_mask(current->mm);
}
#ifdef CONFIG_X86_IOPL_IOPERM staticinlinevoid switch_to_bitmap(unsignedlong tifp)
{ /* * Invalidate I/O bitmap if the previous task used it. This prevents * any possible leakage of an active I/O bitmap. * * If the next task has an I/O bitmap it will handle it on exit to * user mode.
*/ if (tifp & _TIF_IO_BITMAP)
tss_invalidate_io_bitmap();
}
staticvoid tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
{ /* * Copy at least the byte range of the incoming tasks bitmap which * covers the permitted I/O ports. * * If the previous task which used an I/O bitmap had more bits * permitted, then the copy needs to cover those as well so they * get turned off.
*/
memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
max(tss->io_bitmap.prev_max, iobm->max));
/* * Store the new max and the sequence number of this bitmap * and a pointer to the bitmap itself.
*/
tss->io_bitmap.prev_max = iobm->max;
tss->io_bitmap.prev_sequence = iobm->sequence;
}
/** * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
*/ void native_tss_update_io_bitmap(void)
{ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); struct thread_struct *t = ¤t->thread;
u16 *base = &tss->x86_tss.io_bitmap_base;
if (!test_thread_flag(TIF_IO_BITMAP)) {
native_tss_invalidate_io_bitmap(); return;
}
if (WARN_ON_ONCE(!iobm)) {
clear_thread_flag(TIF_IO_BITMAP);
native_tss_invalidate_io_bitmap();
}
/* * Only copy bitmap data when the sequence number differs. The * update time is accounted to the incoming task.
*/ if (tss->io_bitmap.prev_sequence != iobm->sequence)
tss_copy_io_bitmap(tss, iobm);
/* Enable the bitmap */
*base = IO_BITMAP_OFFSET_VALID_MAP;
}
/* * Make sure that the TSS limit is covering the IO bitmap. It might have * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O * access from user space to trigger a #GP because the bitmap is outside * the TSS limit.
*/
refresh_tss_limit();
} #else/* CONFIG_X86_IOPL_IOPERM */ staticinlinevoid switch_to_bitmap(unsignedlong tifp) { } #endif
/* * Shared state setup happens once on the first bringup * of the CPU. It's not destroyed on CPU hotunplug.
*/ if (st->shared_state) return;
raw_spin_lock_init(&st->lock);
/* * Go over HT siblings and check whether one of them has set up the * shared state pointer already.
*/
for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { if (cpu == this_cpu) continue;
if (!per_cpu(ssb_state, cpu).shared_state) continue;
/* Link it to the state of the sibling: */
st->shared_state = per_cpu(ssb_state, cpu).shared_state; return;
}
/* * First HT sibling to come up on the core. Link shared state of * the first HT sibling to itself. The siblings on the same core * which come up later will see the shared state pointer and link * themselves to the state of this CPU.
*/
st->shared_state = st;
}
/* * Logic is: First HT sibling enables SSBD for both siblings in the core * and last sibling to disable it, disables it for the whole core. This how * MSR_SPEC_CTRL works in "hardware": * * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
*/ static __always_inline void amd_set_core_ssb_state(unsignedlong tifn)
{ struct ssb_state *st = this_cpu_ptr(&ssb_state);
u64 msr = x86_amd_ls_cfg_base;
if (!static_cpu_has(X86_FEATURE_ZEN)) {
msr |= ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrq(MSR_AMD64_LS_CFG, msr); return;
}
if (tifn & _TIF_SSBD) { /* * Since this can race with prctl(), block reentry on the * same CPU.
*/ if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) return;
msr |= x86_amd_ls_cfg_ssbd_mask;
raw_spin_lock(&st->shared_state->lock); /* First sibling enables SSBD: */ if (!st->shared_state->disable_state)
wrmsrq(MSR_AMD64_LS_CFG, msr);
st->shared_state->disable_state++;
raw_spin_unlock(&st->shared_state->lock);
} else { if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) return;
static __always_inline void amd_set_ssb_virt_state(unsignedlong tifn)
{ /* * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, * so ssbd_tif_to_spec_ctrl() just works.
*/
wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
/* Forced update. Make sure all relevant TIF flags are different */
local_irq_save(flags);
__speculation_ctrl_update(~tif, tif);
local_irq_restore(flags);
}
/* Called from seccomp/prctl update */ void speculation_ctrl_update_current(void)
{
preempt_disable();
speculation_ctrl_update(speculation_ctrl_update_tif(current));
preempt_enable();
}
/* Enforce MSR update to ensure consistent state */
__speculation_ctrl_update(~tifn, tifn);
}
}
/* * Idle related variables and functions
*/ unsignedlong boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
/* * We use this if we don't have any better idle routine..
*/ void __cpuidle default_idle(void)
{
raw_safe_halt();
raw_local_irq_disable();
} #ifdefined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle); #endif
/* * Remove this CPU from the online mask and disable it * unconditionally. This might be redundant in case that the reboot * vector was handled late and stop_other_cpus() sent an NMI. * * According to SDM and APM NMIs can be accepted even after soft * disabling the local APIC.
*/
set_cpu_online(cpu, false);
disable_local_APIC();
mcheck_cpu_clear(c);
/* * Use wbinvd on processors that support SME. This provides support * for performing a successful kexec when going from SME inactive * to SME active (or vice-versa). The cache must be cleared so that * if there are entries with the same physical address, both with and * without the encryption bit, they don't race each other when flushed * and potentially end up with the wrong entry being committed to * memory. * * Test the CPUID bit directly because the machine might've cleared * X86_FEATURE_SME due to cmdline options.
*/ if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
wbinvd();
/* * This brings a cache line back and dirties it, but * native_stop_other_cpus() will overwrite cpus_stop_mask after it * observed that all CPUs reported stop. This write will invalidate * the related cache line on this CPU.
*/
cpumask_clear_cpu(cpu, &cpus_stop_mask);
#ifdef CONFIG_SMP if (smp_ops.stop_this_cpu) {
smp_ops.stop_this_cpu();
BUG();
} #endif
for (;;) { /* * Use native_halt() so that memory contents don't change * (stack usage and variables) after possibly issuing the * wbinvd() above.
*/
native_halt();
}
}
/* * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf * exists and whenever MONITOR/MWAIT extensions are present there is at * least one C1 substate. * * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait * is passed to kernel commandline parameter.
*/ static __init bool prefer_mwait_c1_over_halt(void)
{ conststruct cpuinfo_x86 *c = &boot_cpu_data;
u32 eax, ebx, ecx, edx;
/* If override is enforced on the command line, fall back to HALT. */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) returnfalse;
/* MWAIT is not supported on this platform. Fallback to HALT */ if (!cpu_has(c, X86_FEATURE_MWAIT)) returnfalse;
/* Monitor has a bug or APIC stops in C1E. Fallback to HALT */ if (boot_cpu_has_bug(X86_BUG_MONITOR) || boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) returnfalse;
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
/* * If MWAIT extensions are not available, it is safe to use MWAIT * with EAX=0, ECX=0.
*/ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) returntrue;
/* * If MWAIT extensions are available, there should be at least one * MWAIT C1 substate present.
*/ return !!(edx & MWAIT_C1_SUBSTATE_MASK);
}
/* * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT * with interrupts enabled and no flags, which is backwards compatible with the * original MWAIT implementation.
*/ static __cpuidle void mwait_idle(void)
{ if (need_resched()) return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) { constvoid *addr = ¤t_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); if (need_resched()) goto out;
__sti_mwait(0, 0);
raw_local_irq_disable();
}
out:
__current_clr_polling();
}
void __init select_idle_routine(void)
{ if (boot_option_idle_override == IDLE_POLL) { if (IS_ENABLED(CONFIG_SMP) && __max_threads_per_core > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); return;
}
/* Required to guard against xen_set_default_idle() */ if (x86_idle_set()) return;
/* * AMD E400 detection needs to happen after ACPI has been enabled. If * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in * MSR_K8_INT_PENDING_MSG.
*/
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); if (!(lo & K8_INTP_C1E_ACTIVE_MASK)) return;
boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
if (IS_ENABLED(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE))
static_branch_enable(&arch_needs_tick_broadcast);
pr_info("System has AMD C1E erratum E400. Workaround enabled.\n");
}
staticint __init idle_setup(char *str)
{ if (!str) return -EINVAL;
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
boot_option_idle_override = IDLE_POLL;
cpu_idle_poll_ctrl(true);
} elseif (!strcmp(str, "halt")) { /* 'idle=halt' HALT for idle. C-states are disabled. */
boot_option_idle_override = IDLE_HALT;
} elseif (!strcmp(str, "nomwait")) { /* 'idle=nomwait' disables MWAIT for idle */
boot_option_idle_override = IDLE_NOMWAIT;
} else { return -EINVAL;
}
unsignedlong arch_randomize_brk(struct mm_struct *mm)
{ if (mmap_is_ia32()) return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
}
/* * Called from fs/proc with a reference on @p to find the function * which called into schedule(). This needs to be done carefully * because the task might wake up and we might look at a stack * changing under us.
*/ unsignedlong __get_wchan(struct task_struct *p)
{ struct unwind_state state; unsignedlong addr = 0;
if (!try_get_task_stack(p)) return 0;
for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state); if (!addr) break; if (in_sched_functions(addr)) continue; break;
}
put_task_stack(p);
return addr;
}
SYSCALL_DEFINE2(arch_prctl, int, option, unsignedlong, arg2)
{ switch (option) { case ARCH_GET_CPUID: return get_cpuid_mode(); case ARCH_SET_CPUID: return set_cpuid_mode(arg2); case ARCH_GET_XCOMP_SUPP: case ARCH_GET_XCOMP_PERM: case ARCH_REQ_XCOMP_PERM: case ARCH_GET_XCOMP_GUEST_PERM: case ARCH_REQ_XCOMP_GUEST_PERM: return fpu_xstate_prctl(option, arg2);
}
if (!in_ia32_syscall()) return do_arch_prctl_64(current, option, arg2);
return -EINVAL;
}
SYSCALL_DEFINE0(ni_syscall)
{ return -ENOSYS;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.17 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.