/* * VMware Detection code. * * Copyright (C) 2008, VMware, Inc. * Author : Alok N Kataria <akataria@vmware.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. *
*/
struct vmware_steal_time { union {
u64 clock; /* stolen time counter in units of vtsc */ struct { /* only for little-endian */
u32 clock_low;
u32 clock_high;
};
};
u64 reserved[7];
};
/** * vmware_steal_clock() - read the per-cpu steal clock * @cpu: the cpu number whose steal clock we want to read * * The function reads the steal clock if we are on a 64-bit system, otherwise * reads it in parts, checking that the high part didn't change in the * meantime. * * Return: * The steal clock reading in ns.
*/ static u64 vmware_steal_clock(int cpu)
{ struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
u64 clock;
do {
initial_high = READ_ONCE(steal->clock_high); /* Do not reorder initial_high and high readings */
virt_rmb();
low = READ_ONCE(steal->clock_low); /* Keep low reading in between */
virt_rmb();
high = READ_ONCE(steal->clock_high);
} while (initial_high != high);
static __init int activate_jump_labels(void)
{ if (has_steal_clock) {
static_key_slow_inc(¶virt_steal_enabled); if (steal_acc)
static_key_slow_inc(¶virt_steal_rq_enabled);
}
if (vmw_sched_clock)
paravirt_set_sched_clock(vmware_sched_clock);
if (vmware_is_stealclock_available()) {
has_steal_clock = true;
static_call_update(pv_steal_clock, vmware_steal_clock);
/* We use reboot notifier only to disable steal clock */
register_reboot_notifier(&vmware_pv_reboot_nb);
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu =
vmware_smp_prepare_boot_cpu; if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/vmware:online",
vmware_cpu_online,
vmware_cpu_down_prepare) < 0)
pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n"); #else
vmware_guest_cpu_init(); #endif
}
} #else #define vmware_paravirt_ops_setup() do {} while (0) #endif
/* * VMware hypervisor takes care of exporting a reliable TSC to the guest. * Still, due to timing difference when running on virtual cpus, the TSC can * be marked as unstable in some cases. For example, the TSC sync check at * bootup can fail due to a marginal offset between vcpus' TSCs (though the * TSCs do not drift from each other). Also, the ACPI PM timer clocksource * is not suitable as a watchdog when running on a hypervisor because the * kernel may miss a wrap of the counter if the vcpu is descheduled for a * long time. To skip these checks at runtime we set these capability bits, * so that the kernel could just trust the hypervisor with providing a * reliable virtual TSC that is suitable for timekeeping.
*/ staticvoid __init vmware_set_capabilities(void)
{
setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); if (vmware_tsc_khz)
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
setup_force_cpu_cap(X86_FEATURE_VMCALL); elseif (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL);
}
#ifdef CONFIG_X86_LOCAL_APIC /* Skip lapic calibration since we know the bus frequency. */
lapic_timer_period = ecx / HZ;
pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
ecx); #endif
} else {
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT))
x86_init.mpparse.find_mptable = mpparse_find_mptable;
/* * While checking the dmi string information, just checking the product * serial key should be enough, as this will always have a VMware * specific string when running under VMware hypervisor. * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode * intentionally defaults to 0.
*/ static u32 __init vmware_platform(void)
{ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { unsignedint eax; unsignedint hyper_vendor_id[3];
cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
&hyper_vendor_id[1], &hyper_vendor_id[2]); if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) { if (eax >= CPUID_VMWARE_FEATURES_LEAF)
vmware_hypercall_mode =
vmware_select_hypercall();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.