// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2023, Microsoft Corporation. * * Hypercall helper functions used by the mshv_root module. * * Authors: Microsoft Linux virtualization team
*/
status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
input, output);
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { if (hv_result_success(status))
*partition_id = output->partition_id;
local_irq_restore(irq_flags);
ret = hv_result_to_errno(status); break;
}
local_irq_restore(irq_flags);
ret = hv_call_deposit_pages(NUMA_NO_NODE,
hv_current_partition_id, 1);
} while (!ret);
return ret;
}
int hv_call_initialize_partition(u64 partition_id)
{ struct hv_input_initialize_partition input;
u64 status; int ret;
input.partition_id = partition_id;
ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
HV_INIT_PARTITION_DEPOSIT_PAGES); if (ret) return ret;
do {
status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
*(u64 *)&input);
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
ret = hv_result_to_errno(status); break;
}
ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
} while (!ret);
return ret;
}
int hv_call_finalize_partition(u64 partition_id)
{ struct hv_input_finalize_partition input;
u64 status;
input.partition_id = partition_id;
status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
*(u64 *)&input);
return hv_result_to_errno(status);
}
int hv_call_delete_partition(u64 partition_id)
{ struct hv_input_delete_partition input;
u64 status;
input.partition_id = partition_id;
status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
return hv_result_to_errno(status);
}
/* Ask the hypervisor to map guest ram pages or the guest mmio space */ staticint hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
u32 flags, struct page **pages, u64 mmio_spa)
{ struct hv_input_map_gpa_pages *input_page;
u64 status, *pfnlist; unsignedlong irq_flags, large_shift = 0; int ret = 0, done = 0;
u64 page_count = page_struct_count;
if (page_count == 0 || (pages && mmio_spa)) return -EINVAL;
if (flags & HV_MAP_GPA_LARGE_PAGE) { if (mmio_spa) return -EINVAL;
if (!HV_PAGE_COUNT_2M_ALIGNED(page_count)) return -EINVAL;
for (i = 0; i < rep_count; i++) if (flags & HV_MAP_GPA_NO_ACCESS) {
pfnlist[i] = 0;
} elseif (pages) {
u64 index = (done + i) << large_shift;
if (index >= page_struct_count) {
ret = -EINVAL; break;
}
pfnlist[i] = page_to_pfn(pages[index]);
} else {
pfnlist[i] = mmio_spa + done + i;
} if (ret) break;
status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
input_page, NULL);
local_irq_restore(irq_flags);
completed = hv_repcomp(status);
if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
HV_MAP_GPA_DEPOSIT_PAGES); if (ret) break;
} elseif (!hv_result_success(status)) {
ret = hv_result_to_errno(status); break;
}
completed = hv_repcomp(status); if (!hv_result_success(status)) {
ret = hv_result_to_errno(status); break;
}
done += completed;
}
return ret;
}
int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn, union hv_gpa_page_access_state_flags state_flags, int *written_total, union hv_gpa_page_access_state *states)
{ struct hv_input_get_gpa_pages_access_state *input_page; union hv_gpa_page_access_state *output_page; int completed = 0; unsignedlong remaining = count; int rep_count, i;
u64 status = 0; unsignedlong flags;
status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
local_irq_restore(flags);
return hv_result_to_errno(status);
}
int
hv_call_clear_virtual_interrupt(u64 partition_id)
{ int status;
status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
partition_id);
return hv_result_to_errno(status);
}
int
hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
u64 connection_partition_id, struct hv_port_info *port_info,
u8 port_vtl, u8 min_connection_vtl, int node)
{ struct hv_input_create_port *input; unsignedlong flags; int ret = 0; int status;
do {
local_irq_save(flags);
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
memset(input, 0, sizeof(*input));
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
ret = hv_result_to_errno(status); break;
}
ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
} while (!ret);
return ret;
}
int
hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
{ union hv_input_delete_port input = { 0 }; int status;
int
hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
u64 connection_partition_id, union hv_connection_id connection_id, struct hv_connection_info *connection_info,
u8 connection_vtl, int node)
{ struct hv_input_connect_port *input; unsignedlong flags; int ret = 0, status;
local_irq_restore(flags); if (hv_result_success(status)) break;
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
ret = hv_result_to_errno(status); break;
}
ret = hv_call_deposit_pages(NUMA_NO_NODE,
connection_partition_id, 1);
} while (!ret);
return ret;
}
int
hv_call_disconnect_port(u64 connection_partition_id, union hv_connection_id connection_id)
{ union hv_input_disconnect_port input = { 0 }; int status;
status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
pfn = output->map_location;
local_irq_restore(flags); if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
ret = hv_result_to_errno(status); if (hv_result_success(status)) break; return ret;
}
ret = hv_call_deposit_pages(NUMA_NO_NODE,
hv_current_partition_id, 1); if (ret) return ret;
} while (!ret);
memset(input_page, 0, sizeof(*input_page)); /* Only set the partition id if you are making the pages * exclusive
*/ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
input_page->partition_id = partition_id;
input_page->flags = flags;
input_page->host_access = host_access;
for (i = 0; i < rep_count; i++) {
u64 index = (done + i) << large_shift;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.