// SPDX-License-Identifier: GPL-2.0-only /* * Based on arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. * Copyright (C) 2012 ARM Ltd.
*/
/* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void)
{
u32 asid; int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
/* Check if the current cpu's ASIDBits is compatible with asid_bits */ void verify_cpu_asid_bits(void)
{
u32 asid = get_cpu_asid_bits();
if (asid < asid_bits) { /* * We cannot decrease the ASID size at runtime, so panic if we support * fewer ASID bits than the boot CPU.
*/
pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
smp_processor_id(), asid, asid_bits);
cpu_panic_kernel();
}
}
staticvoid set_kpti_asid_bits(unsignedlong *map)
{ unsignedint len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsignedlong); /* * In case of KPTI kernel/user ASIDs are allocated in * pairs, the bottom bit distinguishes the two: if it * is set, then the ASID will map only userspace. Thus * mark even as reserved for kernel.
*/
memset(map, 0xaa, len);
}
/* Update the list of reserved ASIDs and the ASID bitmap. */
set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); /* * If this CPU has already been through a * rollover, but hasn't run another task in * the meantime, we must preserve its reserved * ASID, as this is the only trace we have of * the process it is still running.
*/ if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
/* * Queue a TLB invalidation for each CPU to perform on next * context-switch
*/
cpumask_setall(&tlb_flush_pending);
}
staticbool check_update_reserved_asid(u64 asid, u64 newasid)
{ int cpu; bool hit = false;
/* * Iterate over the set of reserved ASIDs looking for a match. * If we find one, then we can update our mm to use newasid * (i.e. the same ASID in the current generation) but we can't * exit the loop early, since we need to ensure that all copies * of the old ASID are updated to reflect the mm. Failure to do * so could result in us missing the reserved ASID in a future * generation.
*/
for_each_possible_cpu(cpu) { if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
if (asid != 0) {
u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
/* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm.
*/ if (check_update_reserved_asid(asid, newasid)) return newasid;
/* * If it is pinned, we can keep using it. Note that reserved * takes priority, because even if it is also pinned, we need to * update the generation into the reserved_asids.
*/ if (refcount_read(&mm->context.pinned)) return newasid;
/* * We had a valid ASID in a previous life, so try to re-use * it if possible.
*/ if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid;
}
/* * Allocate a free ASID. If we can't find one, take a note of the * currently active ASIDs and mark the TLBs as requiring flushes. We * always count from ASID #2 (index 1), as we use ASID #0 when setting * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd * pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) goto set_asid;
/* We're out of ASIDs, so increment the global generation count */
generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
&asid_generation);
flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
if (system_supports_cnp())
cpu_set_reserved_ttbr0();
asid = atomic64_read(&mm->context.id);
/* * The memory ordering here is subtle. * If our active_asids is non-zero and the ASID matches the current * generation, then we update the active_asids entry with a relaxed * cmpxchg. Racing with a concurrent rollover means that either: * * - We get a zero back from the cmpxchg and end up waiting on the * lock. Taking the lock synchronises with the rollover and so * we are forced to see the updated generation. * * - We get a valid ASID back from the cmpxchg, which means the * relaxed xchg in flush_context will treat us as reserved * because atomic RmWs are totally ordered for a given location.
*/
old_active_asid = atomic64_read(this_cpu_ptr(&active_asids)); if (old_active_asid && asid_gen_match(asid) &&
atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
old_active_asid, asid)) goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id); if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
cpu = smp_processor_id(); if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
if (!asid_gen_match(asid)) { /* * We went through one or more rollover since that ASID was * used. Ensure that it is still valid, or generate a new one.
*/
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2; if (pinned_asid_map)
set_kpti_asid_bits(pinned_asid_map);
} /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
/* * There must always be an ASID available after rollover. Ensure that, * even if all CPUs have a reserved ASID and the maximum number of ASIDs * are pinned, there still is at least one empty slot in the ASID map.
*/
max_pinned_asids = num_available_asids - num_possible_cpus() - 2; return 0;
}
arch_initcall(asids_update_limit);
staticint asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
/* * We cannot call set_reserved_asid_bits() here because CPU * caps are not finalized yet, so it is safer to assume KPTI * and reserve kernel ASID's from beginning.
*/ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits(asid_map); return 0;
}
early_initcall(asids_init);
Messung V0.5
¤ Dauer der Verarbeitung: 0.17 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.