// SPDX-License-Identifier: GPL-2.0-or-later /* KVM paravirtual clock driver. A clocksource implementation Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
*/
/* Aligned to page sizes to match what's mapped via vsyscalls to userspace */ #define HVC_BOOT_ARRAY_SIZE \
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
/* * The wallclock is the time of day when we booted. Since then, some time may * have elapsed since the hypervisor wrote the data. So we try to account for * that with system time
*/ staticvoid kvm_get_wallclock(struct timespec64 *now)
{
wrmsrq(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
preempt_disable();
pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
preempt_enable();
}
/* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - * and execute the delays themselves without load. This is wrong, * because no delay loop can finish beforehand. * Any heuristics is subject to fail, because ultimately, a large * poll of guests can be running and trouble each other. So we preset * lpj here
*/ staticunsignedlong kvm_get_tsc_khz(void)
{
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); return pvclock_tsc_khz(this_cpu_pvti());
}
if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) return;
ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
order = get_order(ncpus * sizeof(*hvclock_mem));
p = alloc_pages(GFP_KERNEL, order); if (!p) {
pr_warn("%s: failed to alloc %d pages", __func__, (1U << order)); return;
}
hvclock_mem = page_address(p);
/* * hvclock is shared between the guest and the hypervisor, must * be mapped decrypted.
*/ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
r = set_memory_decrypted((unsignedlong) hvclock_mem,
1UL << order); if (r) {
__free_pages(p, order);
hvclock_mem = NULL;
pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n"); return;
}
}
/* * The per cpu area setup replicates CPU0 data to all cpu * pointers. So carefully check. CPU0 has been set up in init * already.
*/ if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0))) return 0;
/* Use the static page for the first CPUs, allocate otherwise */ if (cpu < HVC_BOOT_ARRAY_SIZE)
p = &hv_clock_boot[cpu]; elseif (hvclock_mem)
p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; else return -ENOMEM;
/* * X86_FEATURE_NONSTOP_TSC is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * Invariant TSC exposed by host means kvmclock is not necessary: * can use TSC as clocksource. *
*/ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
!check_tsc_unstable())
kvm_clock.rating = 299;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.