/* Switch the current MM context. */ staticinlinevoid switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{ unsignedlong ctx_valid, flags; int cpu = smp_processor_id();
per_cpu(per_cpu_secondary_mm, cpu) = mm; if (unlikely(mm == &init_mm)) return;
spin_lock_irqsave(&mm->context.lock, flags);
ctx_valid = CTX_VALID(mm->context); if (!ctx_valid)
get_new_mmu_context(mm);
/* We have to be extremely careful here or else we will miss * a TSB grow if we switch back and forth between a kernel * thread and an address space which has its TSB size increased * on another processor. * * It is possible to play some games in order to optimize the * switch, but the safest thing to do is to unconditionally * perform the secondary context load and the TSB context switch. * * For reference the bad case is, for address space "A": * * CPU 0 CPU 1 * run address space A * set cpu0's bits in cpu_vm_mask * switch to kernel thread, borrow * address space A via entry_lazy_tlb * run address space A * set cpu1's bit in cpu_vm_mask * flush_tlb_pending() * reset cpu_vm_mask to just cpu1 * TSB grow * run address space A * context was valid, so skip * TSB context switch * * At that point cpu0 continues to use a stale TSB, the one from * before the TSB grow performed on cpu1. cpu1 did not cross-call * cpu0 to update its TSB because at that point the cpu_vm_mask * only had cpu1 set in it.
*/
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space * for the first time, we must flush that context out of the * local TLB.
*/ if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#define __HAVE_ARCH_START_CONTEXT_SWITCH staticinlinevoid arch_start_context_switch(struct task_struct *prev)
{ /* Save the current state of MCDPER register for the process * we are switching from
*/ if (adi_capable()) { registerunsignedlong tmp_mcdper;
#define finish_arch_post_lock_switch finish_arch_post_lock_switch staticinlinevoid finish_arch_post_lock_switch(void)
{ /* Restore the state of MCDPER register for the new process * just switched to.
*/ if (adi_capable()) { registerunsignedlong tmp_mcdper;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.