/* * Pointer to the xen_vcpu_info structure or * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info * but during boot it is switched to point to xen_vcpu_info. * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events. * Make sure that xen_vcpu_info doesn't cross a page boundary by making it * cache-line aligned (the struct is guaranteed to have a size of 64 bytes, * which matches the cache line size of 64-bit x86 processors).
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
/* * NB: These need to live in .data or alike because they're used by * xen_prepare_pvh() which runs before clearing the bss.
*/ enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type);
uint32_t __ro_after_init xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
/* * Point at some empty memory to start with. We map the real shared_info * page as soon as fixmap is up and running.
*/ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
/* Number of pages released from the initial allocation. */ unsignedlong xen_released_pages;
/* * Evaluate processor vendor in order to select the correct hypercall * function for HVM/PVH guests. * Might be called very early in boot before vendor has been set by * early_cpu_init().
*/
noinstr void *__xen_hypercall_setfunc(void)
{ void (*func)(void);
/* * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty * dependency chain: it is being called via the xen_hypercall static * call when running as a PVH or HVM guest. Hypercalls need to be * noinstr due to PV guests using hypercalls in noinstr code. So we * can safely tag the function body as "instrumentation ok", since * the PV guest requirement is not of interest here (xen_get_vendor() * calls noinstr functions, and static_call_update_early() might do * so, too).
*/
instrumentation_begin();
staticvoid xen_vcpu_setup_restore(int cpu)
{ /* Any per_cpu(xen_vcpu) is stale, so reset it */
xen_vcpu_info_reset(cpu);
/* * For PVH and PVHVM, setup online VCPUs only. The rest will * be handled by hotplug.
*/ if (xen_pv_domain() ||
(xen_hvm_domain() && cpu_online(cpu)))
xen_vcpu_setup(cpu);
}
/* * On restore, set the vcpu placement up again. * If it fails, then we're in a bad state, since * we can't back out from using it...
*/ void xen_vcpu_restore(void)
{ int cpu;
void xen_vcpu_info_reset(int cpu)
{ if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
per_cpu(xen_vcpu, cpu) =
&HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
} else { /* Set to NULL so that if somebody accesses it we get an OOPS */
per_cpu(xen_vcpu, cpu) = NULL;
}
}
/* * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu) * and at restore (xen_vcpu_restore). Also called for hotplugged * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm). * However, the hypercall can only be done once (see below) so if a VCPU * is offlined and comes back online then let's not redo the hypercall. * * For PV it is called during restore (xen_vcpu_restore) and bootup * (xen_setup_vcpu_info_placement). The hotplug mechanism does not * use this function.
*/ if (xen_hvm_domain()) { if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) return;
}
/* * N.B. This hypercall can _only_ be called once per CPU. * Subsequent calls will error out with -EINVAL. This is due to * the fact that hypervisor has no unregister variant and this * hypercall does not allow to over-write info.mfn and * info.offset.
*/
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
&info); if (err)
panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
pr_info("Booting kernel on %s\n", pv_info.name);
pr_info("Xen version: %u.%u%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
? " (preserve-AD)" : "");
}
/* Check if running on Xen version (major, minor) or later */ bool xen_running_on_version_or_later(unsignedint major, unsignedint minor)
{ unsignedint version;
staticint
xen_panic_event(struct notifier_block *this, unsignedlong event, void *ptr)
{ if (!kexec_crash_loaded()) { if (xen_legacy_crash)
xen_reboot(SHUTDOWN_crash);
reboot_reason = SHUTDOWN_crash;
/* * If panic_timeout==0 then we are supposed to wait forever. * However, to preserve original dom0 behavior we have to drop * into hypervisor. (domU behavior is controlled by its * config file)
*/ if (panic_timeout == 0)
panic_timeout = -1;
} return NOTIFY_DONE;
}
pin_override.pcpu = cpu;
ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
/* Ignore errors when removing override. */ if (cpu < 0) return;
switch (ret) { case -ENOSYS:
pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
cpu);
disable_pinning = true; break; case -EPERM:
WARN(1, "Trying to pin vcpu without having privilege to do so\n");
disable_pinning = true; break; case -EINVAL: case -EBUSY:
pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
cpu); break; case 0: break; default:
WARN(1, "rc %d while trying to pin vcpu\n", ret);
disable_pinning = true;
}
}
/* * No need to check for zero size, should happen rarely and will only * write a new entry regarded to be unused due to zero size.
*/ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { /* Add new region. */ if (xen_extra_mem[i].n_pfns == 0) {
xen_extra_mem[i].start_pfn = start_pfn;
xen_extra_mem[i].n_pfns = n_pfns; break;
} /* Append to existing region. */ if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
start_pfn) {
xen_extra_mem[i].n_pfns += n_pfns; break;
}
} if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC int __init arch_xen_unpopulated_init(struct resource **res)
{ unsignedint i;
if (!xen_domain()) return -ENODEV;
/* Must be set strictly before calling xen_free_unpopulated_pages(). */
*res = &iomem_resource;
/* * Initialize with pages from the extra memory regions (see * arch/x86/xen/setup.c).
*/ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { unsignedint j;
/* * Account for the region being in the physmap but unpopulated. * The value in xen_released_pages is used by the balloon * driver to know how much of the physmap is unpopulated and * set an accurate initial memory target.
*/
xen_released_pages += xen_extra_mem[i].n_pfns; /* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}
return 0;
} #endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.