/* * Check the ASID is still valid for the context. If not generate a new ASID. * * @pasid: Pointer to the current ASID batch * @cpu: current CPU ID. Must have been acquired through get_cpu()
*/ staticinlinevoid asid_check_context(struct asid_info *info,
atomic64_t *pasid, unsignedint cpu, struct mm_struct *mm)
{
u64 asid, old_active_asid;
asid = atomic64_read(pasid);
/* * The memory ordering here is subtle. * If our active_asid is non-zero and the ASID matches the current * generation, then we update the active_asid entry with a relaxed * cmpxchg. Racing with a concurrent rollover means that either: * * - We get a zero back from the cmpxchg and end up waiting on the * lock. Taking the lock synchronises with the rollover and so * we are forced to see the updated generation. * * - We get a valid ASID back from the cmpxchg, which means the * relaxed xchg in flush_context will treat us as reserved * because atomic RmWs are totally ordered for a given location.
*/
old_active_asid = atomic64_read(&active_asid(info, cpu)); if (old_active_asid &&
!((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
old_active_asid, asid)) return;
asid_new_context(info, pasid, cpu, mm);
}
int asid_allocator_init(struct asid_info *info,
u32 bits, unsignedint asid_per_ctxt, void (*flush_cpu_ctxt_cb)(void));
#endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.0 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.