/* * This is a per-cpu array. A processor only modifies its entry and it only * cares about its entry, so it's OK if another processor is modifying its * entry.
*/ struct task_struct *cpu_tasks[NR_CPUS];
EXPORT_SYMBOL(cpu_tasks);
void free_stack(unsignedlong stack, int order)
{
free_pages(stack, order);
}
thread_flags = read_thread_flags(); while (thread_flags & _TIF_WORK_MASK) { if (thread_flags & _TIF_NEED_RESCHED)
schedule(); if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
thread_flags = read_thread_flags();
}
}
int get_current_pid(void)
{ return task_pid_nr(current);
}
/* * This is called magically, by its address being stuffed in a jmp_buf * and being longjmp-d to.
*/ void new_thread_handler(void)
{ int (*fn)(void *); void *arg;
if (current->thread.prev_sched != NULL)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
/* * callback returns only if the kernel thread execs a process
*/
fn(arg);
userspace(¤t->thread.regs.regs);
}
/* Called magically, see new_thread_handler above */ staticvoid fork_handler(void)
{
schedule_tail(current->thread.prev_sched);
/* * XXX: if interrupt_end() calls schedule, this call to * arch_switch_to isn't needed. We could want to apply this to * improve performance. -bb
*/
arch_switch_to(current);
current->thread.prev_sched = NULL;
userspace(¤t->thread.regs.regs);
}
int copy_thread(struct task_struct * p, conststruct kernel_clone_args *args)
{
u64 clone_flags = args->flags; unsignedlong sp = args->stack; unsignedlong tls = args->tls; void (*handler)(void); int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
if (!args->fn) {
memcpy(&p->thread.regs.regs, current_pt_regs(), sizeof(p->thread.regs.regs));
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); if (sp != 0)
REGS_SP(p->thread.regs.regs.gp) = sp;
int copy_from_user_proc(void *to, void __user *from, int size)
{ return copy_from_user(to, from, size);
}
int singlestepping(void)
{ return test_thread_flag(TIF_SINGLESTEP);
}
/* * Only x86 and x86_64 have an arch_align_stack(). * All other arches have "#define arch_align_stack(x) (x)" * in their asm/exec.h * As this is included in UML from asm-um/system-generic.h, * we can use it to behave as the subarch does.
*/ #ifndef arch_align_stack unsignedlong arch_align_stack(unsignedlong sp)
{ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(8192); return sp & ~0xf;
} #endif
stack_page = (unsignedlong) task_stack_page(p); /* Bail if the process has no kernel stack for some reason */ if (stack_page == 0) return 0;
sp = p->thread.switch_buf->JB_SP; /* * Bail if the stack pointer is below the bottom of the kernel * stack for some reason
*/ if (sp < stack_page) return 0;
while (sp < stack_page + THREAD_SIZE) {
ip = *((unsignedlong *) sp); if (in_sched_functions(ip)) /* Ignore everything until we're above the scheduler */
seen_sched = 1; elseif (kernel_text_address(ip) && seen_sched) return ip;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.