staticinlinebool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
{ /* * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is * greater than zero. However, KVM only exposes and emulates the MSR * to/for the guest if the guest PMU supports at least "Architectural * Performance Monitoring Version 2". * * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
*/ return pmu->version > 1;
}
/* * KVM tracks all counters in 64-bit bitmaps, with general purpose counters * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0 * is tracked internally via index 32. On Intel, (AMD doesn't support fixed * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the * amounter of boilerplate needed to iterate over PMCs *and* simplifies common * enabling/disable/reset operations. * * WARNING! This helper is only for lookups that are initiated by KVM, it is * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX * for RDPMC, not by adding 32 to the fixed counter index).
*/ staticinlinestruct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{ if (idx < pmu->nr_arch_gp_counters) return &pmu->gp_counters[idx];
/* returns general purpose PMC with the specified MSR. Note that it can be * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a * parameter to tell them apart.
*/ staticinlinestruct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{ if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
u32 index = array_index_nospec(msr - base,
pmu->nr_arch_gp_counters);
return &pmu->gp_counters[index];
}
return NULL;
}
/* returns fixed PMC with the specified MSR */ staticinlinestruct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{ int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
u32 index = array_index_nospec(msr - base,
pmu->nr_arch_fixed_counters);
/* * Hybrid PMUs don't play nice with virtualization without careful * configuration by userspace, and KVM's APIs for reporting supported * vPMU features do not account for hybrid PMUs. Disable vPMU support * for hybrid PMUs until KVM gains a way to let userspace opt-in.
*/ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
enable_pmu = false;
if (enable_pmu) {
perf_get_x86_pmu_capability(&kvm_pmu_cap);
/* * WARN if perf did NOT disable hardware PMU if the number of * architecturally required GP counters aren't present, i.e. if * there are a non-zero number of counters, but fewer than what * is architecturally required.
*/ if (!kvm_pmu_cap.num_counters_gp ||
WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
enable_pmu = false; elseif (is_intel && !kvm_pmu_cap.version)
enable_pmu = false;
}
if (!enable_pmu) {
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); return;
}
/* * Check if a PMC is enabled by comparing it against global_ctrl bits. * * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
*/ staticinlinebool pmc_is_globally_enabled(struct kvm_pmc *pmc)
{ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
if (!kvm_pmu_has_perf_global_ctrl(pmu)) returntrue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.