if (irqs_disabled()) /* * cpu_switch_mm() needs to flush the VIVT caches. To avoid * high interrupt latencies, defer the call and continue * running with the old mm. Since we only support UP systems * on non-ASID CPUs, the old mm will remain valid until the * finish_arch_post_lock_switch() call.
*/
mm->context.switch_pending = 1; else
cpu_switch_mm(mm->pgd, mm);
}
if (mm && mm->context.switch_pending) { /* * Preemption must be disabled during cpu_switch_mm() as we * have some stateful cache flush implementations. Check * switch_pending again in case we were preempted and the * switch to this mm was already done.
*/
preempt_disable(); if (mm->context.switch_pending) {
mm->context.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm);
}
preempt_enable_no_resched();
}
} #endif/* !MODULE */
/* * This is the actual mm switch as far as the scheduler * is concerned. No registers are touched. We avoid * calling the CPU specific function when the mm hasn't * actually changed.
*/ staticinlinevoid
switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ #ifdef CONFIG_MMU unsignedint cpu = smp_processor_id();
/* * __sync_icache_dcache doesn't broadcast the I-cache invalidation, * so check for possible thread migration and invalidate the I-cache * if we're new to this CPU.
*/ if (cache_ops_need_broadcast() &&
!cpumask_empty(mm_cpumask(next)) &&
!cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_and_switch_context(next, tsk); if (cache_is_vivt())
cpumask_clear_cpu(cpu, mm_cpumask(prev));
} #endif
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.