#ifdef CONFIG_PPC_PSERIES /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */ if (kvmhv_on_pseries()) {
barrier();
get_lppaca()->pmcregs_in_use = load_pmu;
barrier();
} #endif
/* * Load guest. If the VPA said the PMCs are not in use but the guest * tried to access them anyway, HFSCR[PM] will be set by the HFAC * fault so we can make forward progress.
*/ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
mtspr(SPRN_SDAR, vcpu->arch.sdar);
mtspr(SPRN_SIAR, vcpu->arch.siar);
mtspr(SPRN_SIER, vcpu->arch.sier[0]);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
}
/* Set MMCRA then MMCR0 last */
mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]); /* No isync necessary because we're starting counters */
lp = vcpu->arch.vpa.pinned_addr; if (lp)
save_pmu = lp->pmcregs_in_use; if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) { /* * Save pmu if this guest is capable of running nested guests. * This is option is for old L1s that do not set their * lppaca->pmcregs_in_use properly when entering their L2.
*/
save_pmu |= nesting_enabled(vcpu->kvm);
}
if (save_pmu) {
vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
} elseif (vcpu->arch.hfscr & HFSCR_PM) { /* * The guest accessed PMC SPRs without specifying they should * be preserved, or it cleared pmcregs_in_use after the last * access. Just ensure they are frozen.
*/
freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
/* * Demand-fault PMU register access in the guest. * * This is used to grab the guest's VPA pmcregs_in_use value * and reflect it into the host's VPA in the case of a nested * hypervisor. * * It also avoids having to zero-out SPRs after each guest * exit to avoid side-channels when. * * This is cleared here when we exit the guest, so later HFSCR * interrupt handling can add it back to run the guest with * PM enabled next time.
*/ if (!vcpu->arch.nested)
vcpu->arch.hfscr &= ~HFSCR_PM;
} /* otherwise the PMU should still be frozen */
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
mtspr(SPRN_SIER2, host_os_sprs->sier2);
mtspr(SPRN_SIER3, host_os_sprs->sier3);
}
/* Set MMCRA then MMCR0 last */
mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
isync();
}
}
EXPORT_SYMBOL_GPL(switch_pmu_to_host);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.