/* * We begin with definitions supporting the Dynamic Memory protocol * with the host. * * Begin protocol definitions.
*/
/* * Protocol versions. The low word is the minor version, the high word the major * version. * * History: * Initial version 1.0 * Changed to 0.1 on 2009/03/25 * Changes to 0.2 on 2009/05/14 * Changes to 0.3 on 2009/12/03 * Changed to 1.0 on 2011/04/05
*/
union dm_caps { struct {
__u64 balloon:1;
__u64 hot_add:1; /* * To support guests that may have alignment * limitations on hot-add, the guest can specify * its alignment requirements; a value of n * represents an alignment of 2^n in mega bytes.
*/
__u64 hot_add_alignment:4;
__u64 reservedz:58;
} cap_bits;
__u64 caps;
} __packed;
union dm_mem_page_range { struct { /* * The PFN number of the first page in the range. * 40 bits is the architectural limit of a PFN * number for AMD64.
*/
__u64 start_page:40; /* * The number of pages in the range.
*/
__u64 page_cnt:24;
} finfo;
__u64 page_range;
} __packed;
/* * The header for all dynamic memory messages: * * type: Type of the message. * size: Size of the message in bytes; including the header. * trans_id: The guest is responsible for manufacturing this ID.
*/
/* * Specific message types supporting the dynamic memory protocol.
*/
/* * Version negotiation message. Sent from the guest to the host. * The guest is free to try different versions until the host * accepts the version. * * dm_version: The protocol version requested. * is_last_attempt: If TRUE, this is the last version guest will request. * reservedz: Reserved field, set to zero.
*/
/* * Version response message; Host to Guest and indicates * if the host has accepted the version sent by the guest. * * is_accepted: If TRUE, host has accepted the version and the guest * should proceed to the next stage of the protocol. FALSE indicates that * guest should re-try with a different version. * * reservedz: Reserved field, set to zero.
*/
/* * Response to the capabilities message. This is sent from the host to the * guest. This message notifies if the host has accepted the guest's * capabilities. If the host has not accepted, the guest must shutdown * the service. * * is_accepted: Indicates if the host has accepted guest's capabilities. * reservedz: Must be 0.
*/
/* * This message is used to report memory pressure from the guest. * This message is not part of any transaction and there is no * response to this message. * * num_avail: Available memory in pages. * num_committed: Committed memory in pages. * page_file_size: The accumulated size of all page files * in the system in pages. * zero_free: The number of zero and free pages. * page_file_writes: The writes to the page file in pages. * io_diff: An indicator of file cache efficiency or page file activity, * calculated as File Cache Page Fault Count - Page Read Count. * This value is in pages. * * Some of these metrics are Windows specific and fortunately * the algorithm on the host side that computes the guest memory * pressure only uses num_committed value.
*/
/* * Message to ask the guest to allocate memory - balloon up message. * This message is sent from the host to the guest. The guest may not be * able to allocate as much memory as requested. * * num_pages: number of pages to allocate.
*/
/* * Balloon response message; this message is sent from the guest * to the host in response to the balloon message. * * reservedz: Reserved; must be set to zero. * more_pages: If FALSE, this is the last message of the transaction. * if TRUE there will be at least one more message from the guest. * * range_count: The number of ranges in the range array. * * range_array: An array of page ranges returned to the host. *
*/
/* * Un-balloon message; this message is sent from the host * to the guest to give guest more memory. * * more_pages: If FALSE, this is the last message of the transaction. * if TRUE there will be at least one more message from the guest. * * reservedz: Reserved; must be set to zero. * * range_count: The number of ranges in the range array. * * range_array: An array of page ranges returned to the host. *
*/
/* * Hot add response message. * This message is sent by the guest to report the status of a hot add request. * If page_count is less than the requested page count, then the host should * assume all further hot add requests will fail, since this indicates that * the guest has hit an upper physical memory barrier. * * Hot adds may also fail due to low resources; in this case, the guest must * not complete this message until the hot add can succeed, and the host must * not send a new hot add request until the response is sent. * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS * times it fails the request. * * * page_count: number of pages that were successfully hot added. * * result: result of the operation 1: success, 0: failure. *
*/
/* * This message is sent from the host to the guest to pass * some relevant information (win8 addition). * * reserved: no used. * info_size: size of the information blob. * info: information blob.
*/
/* * State to manage hot adding memory into the guest. * The range start_pfn : end_pfn specifies the range * that the host has asked us to hot add. The range * start_pfn : ha_end_pfn specifies the range that we have * currently hot added. We hot add in chunks equal to the * memory block size; it is possible that we may not be able * to bring online all the pages in the region. The range * covered_start_pfn:covered_end_pfn defines the pages that can * be brought online.
*/
struct hv_hotadd_state { struct list_head list; unsignedlong start_pfn; unsignedlong covered_start_pfn; unsignedlong covered_end_pfn; unsignedlong ha_end_pfn; unsignedlong end_pfn; /* * A list of gaps.
*/ struct list_head gap_list;
};
/* * Number of pages we have currently ballooned out.
*/ unsignedint num_pages_ballooned; unsignedint num_pages_onlined; unsignedint num_pages_added;
/* * State to manage the ballooning (up) operation.
*/ struct balloon_state balloon_wrk;
/* * State to execute the "hot-add" operation.
*/ struct hot_add_wrk ha_wrk;
/* * This state tracks if the host has specified a hot-add * region.
*/ bool host_specified_ha_region;
/* * State to synchronize hot-add.
*/ struct completion ol_waitevent; /* * This thread handles hot-add * requests from the host as well as notifying * the host with regards to memory pressure in * the guest.
*/ struct task_struct *thread;
/* * Protects ha_region_list, num_pages_onlined counter and individual * regions from ha_region_list.
*/
spinlock_t ha_lock;
/* * A list of hot-add regions.
*/ struct list_head ha_region_list;
/* * We start with the highest version we can support * and downgrade based on the host; we save here the * next version to try.
*/
__u32 next_version;
/* * The negotiated version agreed by host.
*/
__u32 version;
struct page_reporting_dev_info pr_dev_info;
/* * Maximum number of pages that can be hot_add-ed
*/
__u64 max_dynamic_page_count;
};
while (pfn < start_pfn + nr_pages) { /* * Search for HAS which covers the pfn and when we find one * count how many consequitive PFNs are covered.
*/
found = false;
list_for_each_entry(has, &dm_device.ha_region_list, list) { while ((pfn >= has->start_pfn) &&
(pfn < has->end_pfn) &&
(pfn < start_pfn + nr_pages)) {
found = true; if (has_pfn_is_backed(has, pfn))
count++;
pfn++;
}
}
/* * This PFN is not in any HAS (e.g. we're offlining a region * which was present at boot), no need to account for it. Go * to the next one.
*/ if (!found)
pfn++;
}
switch (val) { case MEM_ONLINE: case MEM_CANCEL_ONLINE:
complete(&dm_device.ol_waitevent); break;
case MEM_OFFLINE:
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
pfn_count = hv_page_offline_check(mem->start_pfn,
mem->nr_pages); if (pfn_count <= dm_device.num_pages_onlined) {
dm_device.num_pages_onlined -= pfn_count;
} else { /* * We're offlining more pages than we * managed to online. This is * unexpected. In any case don't let * num_pages_onlined wrap around zero.
*/
WARN_ON_ONCE(1);
dm_device.num_pages_onlined = 0;
}
} break; case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_OFFLINE: break;
} return NOTIFY_OK;
}
/* Check if the particular page is backed and can be onlined and online it. */ staticvoid hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
{ if (!has_pfn_is_backed(has, page_to_pfn(pg))) { if (!PageOffline(pg))
__SetPageOffline(pg); return;
} elseif (!PageOffline(pg)) return;
/* This frame is currently backed; online the page. */
generic_online_page(pg, 0);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret); if (ret == -EEXIST) { /* * This error indicates that the error * is not a transient failure. This is the * case where the guest's physical address map * precludes hot adding memory. Stop all further * memory hot-add.
*/
do_hot_add = false;
}
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
has->ha_end_pfn -= ha_pages_in_chunk;
has->covered_end_pfn -= processed_pfn;
} break;
}
/* * Wait for memory to get onlined. If the kernel onlined the * memory when adding it, this will return directly. Otherwise, * it will wait for user space to online the memory. This helps * to avoid adding memory faster than it is getting onlined. As * adding succeeded, it is ok to proceed even if the memory was * not onlined in time.
*/
wait_for_completion_timeout(&dm_device.ol_waitevent, secs_to_jiffies(5));
post_status(&dm_device);
}
}
staticint pfn_covered(unsignedlong start_pfn, unsignedlong pfn_cnt)
{ struct hv_hotadd_state *has; struct hv_hotadd_gap *gap; unsignedlong residual; int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on.
*/ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue;
/* * If the current start pfn is not where the covered_end * is, create a gap and update covered_end_pfn.
*/ if (has->covered_end_pfn != start_pfn) {
gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); if (!gap) {
ret = -ENOMEM; break;
}
/* * If the current hot add-request extends beyond * our current limit; extend it.
*/ if ((start_pfn + pfn_cnt) > has->end_pfn) { /* Extend the region by multiples of ha_pages_in_chunk */
residual = (start_pfn + pfn_cnt - has->end_pfn);
has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
}
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
pg_start);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on.
*/ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue;
old_covered_state = has->covered_end_pfn;
if (start_pfn < has->ha_end_pfn) { /* * This is the case where we are backing pages * in an already hot added region. Bring * these pages online first.
*/
pgs_ol = has->ha_end_pfn - start_pfn; if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
has->covered_end_pfn += pgs_ol;
pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already * online. It is possible to observe struct pages still * being uninitialized here so check section instead. * In case the section is online we need to bring the * rest of pfns (which were not backed previously) * online too.
*/ if (start_pfn > has->start_pfn &&
online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
}
if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) { /* * We have some residual hot add range * that needs to be hot added; hot add * it now. Hot add a multiple of * ha_pages_in_chunk that fully covers the pages * we have.
*/
size = (has->end_pfn - has->ha_end_pfn); if (pfn_cnt <= size) {
size = ALIGN(pfn_cnt, ha_pages_in_chunk);
} else {
pfn_cnt = size;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
spin_lock_irqsave(&dm_device.ha_lock, flags);
} /* * If we managed to online any pages that were given to us, * we declare success.
*/
res = has->covered_end_pfn - old_covered_state; break;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
if (rg_start == 0 && !dm->host_specified_ha_region) { /* * The host has not specified the hot-add region. * Based on the hot-add page range being specified, * compute a hot-add region that can cover the pages * that need to be hot-added while ensuring the alignment * and size requirements of Linux as it relates to hot-add.
*/
rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
}
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
rg_start, rg_sz);
dm->num_pages_added += resp.page_count; #endif /* * The result field of the response structure has the * following semantics: * * 1. If all or some pages hot-added: Guest should return success. * * 2. If no pages could be hot-added: * * If the guest returns success, then the host * will not attempt any further hot-add operations. This * signifies a permanent failure. * * If the guest returns failure, then this failure will be * treated as a transient failure and the host may retry the * hot-add operation after some delay.
*/ if (resp.page_count > 0)
resp.result = 1; elseif (!do_hot_add)
resp.result = 1; else
resp.result = 0;
if (!do_hot_add || resp.page_count == 0) { if (!allow_hibernation)
pr_err("Memory hot add failed\n"); else
pr_info("Ignore hot-add request!\n");
}
/* * Post our status as it relates memory pressure to the * host. Host expects the guests to post this status * periodically at 1 second intervals. * * The metrics specified in this protocol are very Windows * specific and so we cook up numbers here to convey our memory * pressure.
*/
/* * The host expects the guest to report free and committed memory. * Furthermore, the host expects the pressure information to include * the ballooned out pages. For a given amount of memory that we are * managing we need to compute a floor below which we should not * balloon. Compute this and add it to the pressure report. * We also need to report all offline pages (num_pages_added - * num_pages_onlined) as committed to the host, otherwise it can try * asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
num_pages_committed = get_pages_committed(dm);
/* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
/* * If our transaction ID is no longer current, just don't * send the status. This can happen if we were interrupted * after we picked our transaction ID.
*/ if (status.hdr.trans_id != atomic_read(&trans_id)) return;
/* * If the last post time that we sampled has changed, * we have raced, don't post the status.
*/ if (last_post != last_post_time) return;
staticvoid free_balloon_pages(struct hv_dynmem_device *dm, union dm_mem_page_range *range_array)
{ int num_pages = range_array->finfo.page_cnt;
__u64 start_frame = range_array->finfo.start_page; struct page *pg; int i;
for (i = 0; i < num_pages; i++) {
pg = pfn_to_page(i + start_frame);
__ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, -1);
adjust_managed_page_count(pg, 1);
}
}
staticunsignedint alloc_balloon_pages(struct hv_dynmem_device *dm, unsignedint num_pages, struct dm_balloon_response *bl_resp, int alloc_unit)
{ unsignedint i, j; struct page *pg;
for (i = 0; i < num_pages / alloc_unit; i++) { if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
HV_HYP_PAGE_SIZE) return i * alloc_unit;
/* * We execute this code in a thread context. Furthermore, * we don't want the kernel to try too hard.
*/
pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
__GFP_NOMEMALLOC | __GFP_NOWARN,
get_order(alloc_unit << PAGE_SHIFT));
staticvoid balloon_up(struct work_struct *dummy)
{ unsignedint num_pages = dm_device.balloon_wrk.num_pages; unsignedint num_ballooned = 0; struct dm_balloon_response *bl_resp; int alloc_unit; int ret; bool done = false; int i; long avail_pages; unsignedlong floor;
/* * We will attempt 2M allocations. However, if we fail to * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
*/
alloc_unit = PAGES_IN_2M;
while (!kthread_should_stop()) {
wait_for_completion_interruptible_timeout(&dm_device.config_event,
secs_to_jiffies(1)); /* * The host expects us to post information on the memory * pressure every second.
*/
reinit_completion(&dm_device.config_event);
post_status(dm); /* * disable free page reporting if multiple hypercall * failure flag set. It is not done in the page_reporting * callback context as that causes a deadlock between * page_reporting_process() and page_reporting_unregister()
*/ if (hv_hypercall_multi_failure >= HV_MAX_FAILURES) {
pr_err("Multiple failures in cold memory discard hypercall, disabling page reporting\n");
disable_page_reporting(); /* Reset the flag after disabling reporting */
hv_hypercall_multi_failure = 0;
}
}
if (vresp->is_accepted) { /* * We are done; wakeup the * context waiting for version * negotiation.
*/
complete(&dm->host_event); return;
} /* * If there are more versions to try, continue * with negotiations; if not * shutdown the service since we are not able * to negotiate a suitable version number * with the host.
*/ if (dm->next_version == 0) goto version_error;
/* * Set the next version to try in case current version fails. * Win7 protocol ought to be the last one to try.
*/ switch (version_req.version.version) { case DYNMEM_PROTOCOL_VERSION_WIN8:
dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
version_req.is_last_attempt = 0; break; default:
dm->next_version = 0;
version_req.is_last_attempt = 1;
}
ret = vmbus_sendpacket(dm->dev->channel, &version_req, sizeof(struct dm_version_request),
(unsignedlong)NULL,
VM_PKT_DATA_INBAND, 0);
range = &hint->ranges[i];
range->address_space = 0;
order = get_order(sg->length); /* * Hyper-V expects the additional_pages field in the units * of one of these 3 sizes, 4Kbytes, 2Mbytes or 1Gbytes. * This is dictated by the values of the fields page.largesize * and page_size. * This code however, only uses 4Kbytes and 2Mbytes units * and not 1Gbytes unit.
*/
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
hint, NULL);
local_irq_restore(flags); if (!hv_result_success(status)) {
pr_err("Cold memory discard hypercall failed with status %llx\n",
status); if (hv_hypercall_multi_failure > 0)
hv_hypercall_multi_failure++;
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
pr_err("Defaulting to page_reporting_order %d\n",
pageblock_order);
page_reporting_order = pageblock_order;
hv_hypercall_multi_failure++; return -EINVAL;
}
return -EINVAL;
}
return 0;
}
staticvoid enable_page_reporting(void)
{ int ret;
if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
pr_debug("Cold memory discard hint not supported by Hyper-V\n"); return;
}
BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
dm_device.pr_dev_info.report = hv_free_page_report; /* * We let the page_reporting_order parameter decide the order * in the page_reporting code
*/
dm_device.pr_dev_info.order = 0;
ret = page_reporting_register(&dm_device.pr_dev_info); if (ret < 0) {
dm_device.pr_dev_info.report = NULL;
pr_err("Failed to enable cold memory discard: %d\n", ret);
} else {
pr_info("Cold memory discard hint enabled with order %d\n",
page_reporting_order);
}
}
staticint ballooning_enabled(void)
{ /* * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE), * since currently it's unclear to us whether an unballoon request can * make sure all page ranges are guest page size aligned.
*/ if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
pr_info("Ballooning disabled because page size is not 4096 bytes\n"); return 0;
}
return 1;
}
staticint hot_add_enabled(void)
{ /* * Disable hot add on ARM64, because we currently rely on * memory_add_physaddr_to_nid() to get a node id of a hot add range, * however ARM64's memory_add_physaddr_to_nid() always return 0 and * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for * add_memory().
*/ if (IS_ENABLED(CONFIG_ARM64)) {
pr_info("Memory hot add disabled on ARM64\n"); return 0;
}
/* * max_pkt_size should be large enough for one vmbus packet header plus * our receive buffer size. Hyper-V sends messages up to * HV_HYP_PAGE_SIZE bytes long on balloon channel.
*/
dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
balloon_onchannelcallback, dev); if (ret) return ret;
/* * Initiate the hand shake with the host and negotiate * a version that the host can support. We start with the * highest version number and go down if the host cannot * support it.
*/
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req, sizeof(struct dm_version_request),
(unsignedlong)NULL, VM_PKT_DATA_INBAND, 0); if (ret) goto out;
t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5)); if (t == 0) {
ret = -ETIMEDOUT; goto out;
}
/* * If we could not negotiate a compatible version with the host * fail the probe function.
*/ if (dm_device.state == DM_INIT_ERROR) {
ret = -EPROTO; goto out;
}
pr_info("Using Dynamic Memory protocol version %u.%u\n",
DYNMEM_MAJOR_VERSION(dm_device.version),
DYNMEM_MINOR_VERSION(dm_device.version));
/* * Now submit our capabilities to the host.
*/
memset(&cap_msg, 0, sizeof(struct dm_capabilities));
cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
cap_msg.hdr.size = sizeof(struct dm_capabilities);
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
/* * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host * currently still requires the bits to be set, so we have to add code * to fail the host's hot-add and balloon up/down requests, if any.
*/
cap_msg.caps.cap_bits.balloon = ballooning_enabled();
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/* * Specify our alignment requirements for memory hot-add. The value is * the log base 2 of the number of megabytes in a chunk. For example, * with 256 MiB chunks, the value is 8. The number of MiB in a chunk * must be a power of 2.
*/
cap_msg.caps.cap_bits.hot_add_alignment =
ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
/* * Currently the host does not use these * values and we set them to what is done in the * Windows driver.
*/
cap_msg.min_page_cnt = 0;
cap_msg.max_page_number = -1;
ret = vmbus_sendpacket(dev->channel, &cap_msg, sizeof(struct dm_capabilities),
(unsignedlong)NULL, VM_PKT_DATA_INBAND, 0); if (ret) goto out;
t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5)); if (t == 0) {
ret = -ETIMEDOUT; goto out;
}
/* * If the host does not like our capabilities, * fail the probe function.
*/ if (dm_device.state == DM_INIT_ERROR) {
ret = -EPROTO; goto out;
}
/** * hv_balloon_debug_show - shows statistics of balloon operations. * @f: pointer to the &struct seq_file. * @offset: ignored. * * Provides the statistics that can be accessed in hv-balloon in the debugfs. * * Return: zero on success or an error code.
*/ staticint hv_balloon_debug_show(struct seq_file *f, void *offset)
{ struct hv_dynmem_device *dm = f->private; char *sname;
staticint balloon_probe(struct hv_device *dev, conststruct hv_vmbus_device_id *dev_id)
{ int ret;
allow_hibernation = hv_is_hibernation_supported(); if (allow_hibernation)
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG /* * Hot-add must operate in chunks that are of size equal to the * memory block size because that's what the core add_memory() * interface requires. The Hyper-V interface requires that the memory * block size be a power of 2, which is guaranteed by the check in * memory_dev_init().
*/
ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
do_hot_add = hot_add; #else /* * Without MEMORY_HOTPLUG, the guest returns a failure status for all * hot add requests from Hyper-V, and the chunk size is used only to * specify alignment to Hyper-V as required by the host/guest protocol. * Somewhat arbitrarily, use 128 MiB.
*/
ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
do_hot_add = false; #endif
dm_device.dev = dev;
dm_device.state = DM_INITIALIZING;
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
spin_lock_init(&dm_device.ha_lock);
INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
dm_device.host_specified_ha_region = false;
/* * This is to handle the case when balloon_resume() * call has failed and some cleanup has been done as * a part of the error handling.
*/ if (dm_device.state != DM_INIT_ERROR) {
disable_page_reporting();
vmbus_close(dev->channel); #ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page); #endif
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.