#ifdef CONFIG_PPC_64S_HASH_MMU staticint realloc_context_ids(mm_context_t *ctx)
{ int i, id;
/* * id 0 (aka. ctx->id) is special, we always allocate a new one, even if * there wasn't one allocated previously (which happens in the exec * case where ctx is newly allocated). * * We have to be a bit careful here. We must keep the existing ids in * the array, so that we can test if they're non-zero to decide if we * need to allocate a new one. However in case of error we must free the * ids we've allocated but *not* any of the existing ones (or risk a * UAF). That's why we decrement i at the start of the error handling * loop, to skip the id that we just tested but couldn't reallocate.
*/ for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { if (i == 0 || ctx->extended_id[i]) {
id = hash__alloc_context_id(); if (id < 0) goto error;
ctx->extended_id[i] = id;
}
}
/* The caller expects us to return id */ return ctx->id;
error: for (i--; i >= 0; i--) { if (ctx->extended_id[i])
ida_free(&mmu_context_ida, ctx->extended_id[i]);
}
return id;
}
staticint hash__init_new_context(struct mm_struct *mm)
{ int index;
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL); if (!mm->context.hash_context) return -ENOMEM;
/* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been * forced down to 4K. * * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check * explicitly against context.id == 0. This ensures that we properly * initialize context slice details for newly allocated mm's (which will * have id == 0) and don't alter context slice inherited via fork (which * will have id != 0). * * We should not be calling init_new_context() on init_mm. Hence a * check against 0 is OK.
*/ if (mm->context.id == 0) {
memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
slice_init_new_context_exec(mm);
} else { /* This is fork. Copy hash_context details from current->mm */
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); #ifdef CONFIG_PPC_SUBPAGE_PROT /* inherit subpage prot details if we have one. */ if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL); if (!mm->context.hash_context->spt) {
kfree(mm->context.hash_context); return -ENOMEM;
}
} #endif
}
index = realloc_context_ids(&mm->context); if (index < 0) { #ifdef CONFIG_PPC_SUBPAGE_PROT
kfree(mm->context.hash_context->spt); #endif
kfree(mm->context.hash_context); return index;
}
staticint radix__init_new_context(struct mm_struct *mm)
{ unsignedlong rts_field; int index, max_id;
max_id = (1 << mmu_pid_bits) - 1;
index = alloc_context_id(mmu_base_pid, max_id); if (index < 0) return index;
/* * set the process table entry,
*/
rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
/* * Order the above store with subsequent update of the PID * register (at which point HW can start loading/caching * the entry) and the corresponding load by the MMU from * the L2 cache.
*/ asmvolatile("ptesync;isync" : : : "memory");
frag = mm->context.pte_frag; if (frag)
pte_frag_destroy(frag);
frag = mm->context.pmd_frag; if (frag)
pmd_frag_destroy(frag); return;
}
void destroy_context(struct mm_struct *mm)
{ #ifdef CONFIG_SPAPR_TCE_IOMMU
WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); #endif /* * For tasks which were successfully initialized we end up calling * arch_exit_mmap() which clears the process table entry. And * arch_exit_mmap() is called before the required fullmm TLB flush * which does a RIC=2 flush. Hence for an initialized task, we do clear * any cached process table entries. * * The condition below handles the error case during task init. We have * set the process table entry early and if we fail a task * initialization, we need to ensure the process table entry is zeroed. * We need not worry about process table entry caches because the task * never ran with the PID value.
*/ if (radix_enabled())
process_tb[mm->context.id].prtb0 = 0; else
subpage_prot_free(mm);
destroy_contexts(&mm->context);
mm->context.id = MMU_NO_CONTEXT;
}
if (radix_enabled()) { /* * Radix doesn't have a valid bit in the process table * entries. However we know that at least P9 implementation * will avoid caching an entry with an invalid RTS field, * and 0 is invalid. So this will do. * * This runs before the "fullmm" tlb flush in exit_mmap, * which does a RIC=2 tlbie to clear the process table * entry. See the "fullmm" comments in tlb-radix.c. * * No barrier required here after the store because * this process will do the invalidate, which starts with * ptesync.
*/
process_tb[mm->context.id].prtb0 = 0;
}
}
/** * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined) * * This clears the CPU from mm_cpumask for all processes, and then flushes the * local TLB to ensure TLB coherency in case the CPU is onlined again. * * KVM guest translations are not necessarily flushed here. If KVM started * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
*/ #ifdef CONFIG_HOTPLUG_CPU void cleanup_cpu_mmu_context(void)
{ int cpu = smp_processor_id();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.