/* * If arch is not happy with system "iomem_resource" being used for * the region allocation it can provide it's own view by creating specific * Xen resource with unused regions of guest physical address space provided * by the hypervisor.
*/ int __weak __init arch_xen_unpopulated_init(struct resource **res)
{
*res = &iomem_resource;
return 0;
}
staticint fill_list(unsignedint nr_pages)
{ struct dev_pagemap *pgmap; struct resource *res, *tmp_res = NULL; void *vaddr; unsignedint i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); struct range mhp_range; int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM;
ret = allocate_resource(target_resource, res,
alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n"); goto err_resource;
}
/* * Reserve the region previously allocated from Xen resource to avoid * re-using it by someone else.
*/ if (target_resource != &iomem_resource) {
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); if (!tmp_res) {
ret = -ENOMEM; goto err_insert;
}
#ifdef CONFIG_XEN_HAVE_PVMMU /* * memremap will build page tables for the new memory so * the p2m must contain invalid entries so the correct * non-present PTEs will be written. * * If a failure occurs, the original (identity) p2m entries * are not restored since this region is now known not to * conflict with any devices.
*/ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
xen_pfn_t pfn = PFN_DOWN(res->start);
for (i = 0; i < alloc_pages; i++) { if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n");
ret = -ENOMEM; goto err_memremap;
}
}
} #endif
vaddr = memremap_pages(pgmap, NUMA_NO_NODE); if (IS_ERR(vaddr)) {
pr_err("Cannot remap memory range\n");
ret = PTR_ERR(vaddr); goto err_memremap;
}
for (i = 0; i < alloc_pages; i++) { struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
/** * xen_alloc_unpopulated_pages - alloc unpopulated pages * @nr_pages: Number of pages * @pages: pages returned * @return 0 on success, error otherwise
*/ int xen_alloc_unpopulated_pages(unsignedint nr_pages, struct page **pages)
{ unsignedint i; int ret = 0;
/* * Fallback to default behavior if we do not have any suitable resource * to allocate required region from and as the result we won't be able to * construct pages.
*/ if (!target_resource) return xen_alloc_ballooned_pages(nr_pages, pages);
mutex_lock(&list_lock); if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count); if (ret) goto out;
}
for (i = 0; i < nr_pages; i++) { struct page *pg = page_list;
#ifdef CONFIG_XEN_HAVE_PVMMU if (!xen_feature(XENFEAT_auto_translated_physmap)) {
ret = xen_alloc_p2m_entry(page_to_pfn(pg)); if (ret < 0) { unsignedint j;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.