/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
/* These are unused until we support booting "pre-ballooned" */ unsignedlong xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* get wallclock at system boot */ do {
version = wall_clock->version;
rmb(); /* fetch version before time */
now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec;
now.tv_nsec = wall_clock->nsec;
rmb(); /* fetch time before checking version */
} while ((wall_clock->version & 1) || (version != wall_clock->version));
/* time since system boot */
ktime_get_ts64(&ts_monotonic);
*ts = timespec64_add(now, ts_monotonic);
}
/* * We only take the expensive HV call when the clock was set * or when the 11 minutes RTC synchronization time elapsed.
*/ if (!was_set && timespec64_compare(&now, &next_sync) < 0) return NOTIFY_OK;
/* * VCPUOP_register_vcpu_info cannot be called twice for the same * vcpu, so if vcpu_info is already registered, just get out. This * can happen with cpu-hotplug.
*/ if (per_cpu(xen_vcpu, cpu) != NULL) goto after_register_vcpu_info;
if (of_flat_dt_is_compatible(node, hyper_node.compat))
hyper_node.found = true;
s = of_get_flat_dt_prop(node, "compatible", &len); if (strlen(hyper_node.prefix) + 3 < len &&
!strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix)))
hyper_node.version = s + strlen(hyper_node.prefix);
/* * Check if Xen supports EFI by checking whether there is the * "/hypervisor/uefi" node in DT. If so, runtime services are available * through proxy functions (e.g. in case of Xen dom0 EFI implementation * they call special hypercall which executes relevant EFI functions) * and that is why they are always enabled.
*/ if (IS_ENABLED(CONFIG_XEN_EFI)) { if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
!efi_runtime_disabled())
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
return 0;
}
/* * see Documentation/devicetree/bindings/arm/xen.txt for the * documentation of the Xen Device Tree format.
*/ void __init xen_early_init(void)
{
of_scan_flat_dt(fdt_find_hyper_node, NULL); if (!hyper_node.found) {
pr_debug("No Xen support\n"); return;
}
if (hyper_node.version == NULL) {
pr_debug("Xen version not found\n"); return;
}
pr_info("Xen %s support found\n", hyper_node.version);
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
if (xen_feature(XENFEAT_dom0))
xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
if (!console_set_on_cmdline && !xen_initial_domain())
add_preferred_console("hvc", 0, NULL);
}
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC /* * A type-less specific Xen resource which contains extended regions * (unused regions of guest physical address space provided by the hypervisor).
*/ staticstruct resource xen_resource = {
.name = "Xen unused space",
};
int __init arch_xen_unpopulated_init(struct resource **res)
{ struct device_node *np; struct resource *regs, *tmp_res;
uint64_t min_gpaddr = -1, max_gpaddr = 0; unsignedint i, nr_reg = 0; int rc;
if (!xen_domain()) return -ENODEV;
if (!acpi_disabled) return -ENODEV;
np = of_find_compatible_node(NULL, NULL, "xen,xen"); if (WARN_ON(!np)) return -ENODEV;
/* Skip region 0 which is reserved for grant table space */ while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
nr_reg++;
if (!nr_reg) {
pr_err("No extended regions are found\n");
of_node_put(np); return -EINVAL;
}
/* * Create resource from extended regions provided by the hypervisor to be * used as unused address space for Xen scratch pages.
*/ for (i = 0; i < nr_reg; i++) {
rc = of_address_to_resource(np, i + EXT_REGION_INDEX, ®s[i]); if (rc) goto err;
if (max_gpaddr < regs[i].end)
max_gpaddr = regs[i].end; if (min_gpaddr > regs[i].start)
min_gpaddr = regs[i].start;
}
/* * Mark holes between extended regions as unavailable. The rest of that * address space will be available for the allocation.
*/ for (i = 1; i < nr_reg; i++) {
resource_size_t start, end;
/* There is an overlap between regions */ if (regs[i - 1].end + 1 > regs[i].start) {
rc = -EINVAL; goto err;
}
/* There is no hole between regions */ if (regs[i - 1].end + 1 == regs[i].start) continue;
xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!xen_node) {
pr_err("Xen support was detected before, but it has disappeared\n"); return;
}
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
pr_err("Xen grant table region is not found\n");
of_node_put(xen_node); return;
}
of_node_put(xen_node);
xen_grant_frames = res.start;
}
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
if (!acpi_disabled)
xen_acpi_guest_init(); else
xen_dt_guest_init();
if (!xen_events_irq) {
pr_err("Xen event channel interrupt not found\n"); return -ENODEV;
}
/* * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI * parameters are found. Force enable runtime services.
*/ if (efi_enabled(EFI_RUNTIME_SERVICES))
xen_efi_runtime_setup();
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info * page, we use it in the event channel upcall and in some pvclock * related functions. * The shared info contains exactly 1 CPU (the boot CPU). The guest * is required to use VCPUOP_register_vcpu_info to place vcpu info * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
1 << fls(sizeof(struct vcpu_info) - 1)); if (xen_vcpu_info == NULL) return -ENOMEM;
/* Direct vCPU id mapping for ARM guests. */
for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.