/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2004 Thiemo Seufer * Copyright (C) 2013 Imagination Technologies Ltd.
*/ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/nmi.h> #include <linux/personality.h> #include <linux/prctl.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h>
void exit_thread(struct task_struct *tsk)
{ /* * User threads may have allocated a delay slot emulation frame. * If so, clean up that allocation.
*/ if (!(current->flags & PF_KTHREAD))
dsemul_thread_cleanup(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{ /* * Save any process state which is live in hardware registers to the * parent context prior to duplication. This prevents the new child * state becoming stale if the parent is preempted before copy_thread() * gets a chance to save the parent's live hardware registers to the * child context.
*/
preempt_disable();
if (is_msa_enabled())
save_msa(current); elseif (is_fpu_owner())
_save_fp(current);
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */
childksp = (unsignedlong) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
/* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu.
*/
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDMSA);
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
case mm_pool32b_op: switch (ip->mm_m_format.func) { case mm_swm32_func: if (ip->mm_m_format.rd < 0x10) return 0; if (ip->mm_m_format.base != 29) return 0;
if (is_jr_ra_ins(ip)) { break;
} elseif (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size); continue;
} elseif (!saw_jump && is_jump_ins(ip)) { /* * If we see a jump instruction, we are finished * with the frame save. * * Some functions can have a shortcut return at * the beginning of the function, so don't start * looking for jump instruction until we see the * frame setup. * * The RA save instruction can get put into the * delay slot of the jump instruction, so look * at the next instruction, too.
*/
saw_jump = true; continue;
} if (info->pc_offset == -1 &&
is_ra_save_ins(&insn, &info->pc_offset)) break; if (saw_jump) break;
} if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; if (info->pc_offset < 0) /* leaf */ return 1; /* prologue seems bogus... */
err: return -1;
}
/* * Without schedule() frame info, result given by * thread_saved_pc() and __get_wchan() are not reliable.
*/ if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
return 0;
}
arch_initcall(frame_info_init);
/* * Return saved PC of a blocked thread.
*/ staticunsignedlong thread_saved_pc(struct task_struct *tsk)
{ struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */ if (t->reg31 == (unsignedlong) ret_from_fork) return t->reg31; if (schedule_mfi.pc_offset < 0) return 0; return ((unsignedlong *)t->reg29)[schedule_mfi.pc_offset];
}
/* * IRQ stacks start at IRQ_STACK_START * task stacks at THREAD_SIZE - 32
*/
low = stack_page; if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/* * If we reached the top of the interrupt stack, start unwinding * the interrupted task stack.
*/ if (unlikely(*sp == irq_stack_high)) { unsignedlong task_sp = *(unsignedlong *)*sp;
/* * Check that the pointer saved in the IRQ stack head points to * something within the stack of the current task
*/ if (!object_is_on_stack((void *)task_sp)) return 0;
/* * Follow pointer to tasks kernel stack frame where interrupted * state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc; if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31]; return pc;
} return 0;
} if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) return 0; /* * Return ra if an exception occurred at the first instruction
*/ if (unlikely(ofs == 0)) {
pc = *ra;
*ra = 0; return pc;
}
info.func = (void *)(pc - ofs);
info.func_size = ofs; /* analyze from start to ofs */
leaf = get_frame_info(&info); if (leaf < 0) return 0;
if (leaf) /* * For some extreme cases, get_frame_info() can * consider wrongly a nested function as a leaf * one. In that cases avoid to return always the * same value.
*/
pc = pc != *ra ? *ra : 0; else
pc = ((unsignedlong *)(*sp))[info.pc_offset];
/* * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/ unsignedlong __get_wchan(struct task_struct *task)
{ unsignedlong pc = 0; #ifdef CONFIG_KALLSYMS unsignedlong sp; unsignedlong ra = 0; #endif
while (in_sched_functions(pc))
pc = unwind_stack(task, &sp, pc, &ra); #endif
out: return pc;
}
unsignedlong mips_stack_top(void)
{ unsignedlong top = TASK_SIZE & PAGE_MASK;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { /* One page for branch delay slot "emulation" */
top -= PAGE_SIZE;
}
/* Space for the VDSO, data page & GIC user page */ if (current->thread.abi) {
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space to randomize the VDSO base */ if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
}
/* Space for cache colour alignment */ if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
return top;
}
/* * Don't forget that the stack pointer must be aligned on a 8 bytes * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/ unsignedlong arch_align_stack(unsignedlong sp)
{ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
staticvoid raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd; int cpu;
for_each_cpu(cpu, mask) { /* * If we previously sent an IPI to the target CPU & it hasn't * cleared its bit in the busy cpumask then it didn't handle * our previous IPI & it's not safe for us to reuse the * call_single_data_t.
*/ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu); continue;
}
int mips_get_process_fp_mode(struct task_struct *task)
{ int value = 0;
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
value |= PR_FP_MODE_FR; if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
value |= PR_FP_MODE_FRE;
return value;
}
staticlong prepare_for_fp_mode_switch(void *unused)
{ /* * This is icky, but we use this to simply ensure that all CPUs have * context switched, regardless of whether they were previously running * kernel or user code. This ensures that no CPU that a mode-switching * program may execute on keeps its FPU enabled (& in the old mode) * throughout the mode switch.
*/ return 0;
}
/* FR = 0 not supported in MIPS R6 */ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP;
/* Indicate the new FP mode in each thread */
for_each_thread(task, t) { /* Update desired FP register width */ if (value & PR_FP_MODE_FR) {
clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
} else {
set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
}
/* Update desired FP single layout */ if (value & PR_FP_MODE_FRE)
set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); else
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
/* * We need to ensure that all threads in the process have switched mode * before returning, in order to allow userland to not worry about * races. We can do this by forcing all CPUs that any thread in the * process may be running on to schedule something else - in this case * prepare_for_fp_mode_switch(). * * We begin by generating a mask of all CPUs that any thread in the * process may be running on.
*/
cpumask_clear(&process_cpus);
for_each_thread(task, t)
cpumask_set_cpu(task_cpu(t), &process_cpus);
/* * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs. * * The CPUs may have rescheduled already since we switched mode or * generated the cpumask, but that doesn't matter. If the task in this * process is scheduled out then our scheduling * prepare_for_fp_mode_switch() will simply be redundant. If it's * scheduled in then it will already have picked up the new FP mode * whilst doing so.
*/
cpus_read_lock();
for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
cpus_read_unlock();
for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { /* k0/k1 are copied as zero. */ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
uregs[i] = 0; else
uregs[i] = regs->regs[i - MIPS32_EF_R0];
}
for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { /* k0/k1 are copied as zero. */ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
uregs[i] = 0; else
uregs[i] = regs->regs[i - MIPS64_EF_R0];
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.