staticvoid *__init alloc_paca_data(unsignedlong size, unsignedlong align, unsignedlong limit, int cpu)
{ void *ptr; int nid;
/* * boot_cpuid paca is allocated very early before cpu_to_node is up. * Set bottom-up mode, because the boot CPU should be on node-0, * which will put its paca in the right place.
*/ if (cpu == boot_cpuid) {
nid = NUMA_NO_NODE;
memblock_set_bottom_up(true);
} else {
nid = early_cpu_to_node(cpu);
}
if (!shared_lppaca) {
memblock_set_bottom_up(true);
/* * See Documentation/arch/powerpc/ultravisor.rst for more details. * * UV/HV data sharing is in PAGE_SIZE granularity. In order to * minimize the number of pages shared, align the allocation to * PAGE_SIZE.
*/
shared_lppaca =
memblock_alloc_try_nid(shared_lppaca_total_size,
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE); if (!shared_lppaca)
panic("cannot allocate shared data");
/* * This is very early in boot, so no harm done if the kernel crashes at * this point.
*/
BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
return ptr;
}
/* * See asm/lppaca.h for more detail. * * lppaca structures must must be 1kB in size, L1 cache line aligned, * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy * these requirements.
*/ staticinlinevoid init_lppaca(struct lppaca *lppaca)
{
BUILD_BUG_ON(sizeof(struct lppaca) != 640);
#ifdef CONFIG_PPC_64S_HASH_MMU /* * 3 persistent SLBs are allocated here. The buffer will be zero * initially, hence will all be invaild until we actually write them. * * If you make the number of persistent SLB entries dynamic, please also * update PR KVM to flush and restore them accordingly.
*/ staticstruct slb_shadow * __init new_slb_shadow(int cpu, unsignedlong limit)
{ struct slb_shadow *s;
if (cpu != boot_cpuid) { /* * Boot CPU comes here before early_radix_enabled * is parsed (e.g., for disable_radix). So allocate * always and this will be fixed up in free_unused_pacas.
*/ if (early_radix_enabled()) return NULL;
}
s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
/* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. * On systems with hardware multi-threading, there are two threads * per processor. The Paca array must contain an entry for each thread. * The VPD Areas will give a max logical processors = 2 * max physical * processors. The processor VPD array needs one entry per physical * processor (not thread).
*/ struct paca_struct **paca_ptrs __read_mostly;
EXPORT_SYMBOL(paca_ptrs);
#ifdef CONFIG_PPC_BOOK3E_64 /* For now -- if we have threads this will be adjusted later */
new_paca->tcd_ptr = &new_paca->tcd; #endif
}
/* Put the paca pointer into r13 and SPRG_PACA */ void setup_paca(struct paca_struct *new_paca)
{ /* Setup r13 */
local_paca = new_paca;
#ifdef CONFIG_PPC_BOOK3E_64 /* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); #else /* * In HV mode, we setup both HPACA and PACA to avoid problems * if we do a GET_PACA() before the feature fixups have been * applied. * * Normally you should test against CPU_FTR_HVMODE, but CPU features * are not yet set up when we first reach here.
*/ if (mfmsr() & MSR_HV)
mtspr(SPRN_SPRG_HPACA, local_paca); #endif
mtspr(SPRN_SPRG_PACA, local_paca);
#ifdef CONFIG_PPC_BOOK3S_64 /* * We access pacas in real mode, and cannot take SLB faults * on them when in virtual mode, so allocate them accordingly.
*/
limit = min(ppc64_bolted_size(), ppc64_rma_size); #else
limit = ppc64_rma_size; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.