/* * All unused by hardware upper bits will be considered * as a software asid extension.
*/ staticinline u64 asid_version_mask(unsignedint cpu)
{ return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
}
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
*need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
/* * Initialize the context related info for a new mm_struct * instance.
*/ staticinlineint
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ int i;
if (need_flush)
local_flush_tlb_user(); /* Flush tlb after update ASID */
/* * Mark current->active_mm as not "active" anymore. * We don't want to mislead possible IPI tlb flush routines.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
}
/* * Destroy context related info for an mm_struct that is about * to be put to rest.
*/ staticinlinevoid destroy_context(struct mm_struct *mm)
{
}
#define activate_mm(prev, next) switch_mm(prev, next, current) #define deactivate_mm(task, mm) do { } while (0)
/* * If mm is currently active, we can't really drop it. * Instead, we will get a new one for it.
*/ staticinlinevoid
drop_mmu_context(struct mm_struct *mm, unsignedint cpu)
{ int asid; unsignedlong flags;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.