/* * Per cpu state for channel handling
*/ struct hv_per_cpu_context { void *synic_message_page; void *synic_event_page;
/* * The page is only used in hv_post_message() for a TDX VM (with the * paravisor) to post a messages to Hyper-V: when such a VM calls * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which * is encrypted in such a VM) as the hypercall input page, because * the input page for HVCALL_POST_MESSAGE must be decrypted in such a * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is * introduced for this purpose. See hyperv_init() for more comments.
*/ void *post_msg_page;
/* * Starting with win8, we can take channel interrupts on any CPU; * we will manage the tasklet that handles events messages on a per CPU * basis.
*/ struct tasklet_struct msg_dpc;
};
struct hv_context { /* We only support running on top of Hyper-V * So at this point this really can only contain the Hyper-V ID
*/
u64 guestid;
struct hv_per_cpu_context __percpu *cpu_context;
/* * To manage allocations in a NUMA node. * Array indexed by numa node ID.
*/ struct cpumask *hv_numa_map;
};
/* * The Maximum number of channels (16384) is determined by the size of the * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to * send endpoint interrupts, and the other is to receive endpoint interrupts.
*/ #define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
/* The value here must be in multiple of 32 */ #define MAX_NUM_CHANNELS_SUPPORTED 256
/* * The CPU that Hyper-V will interrupt for VMBUS messages, such as * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
*/ #define VMBUS_CONNECT_CPU 0
struct vmbus_connection {
u32 msg_conn_id;
atomic_t offer_in_progress;
enum vmbus_connect_state conn_state;
atomic_t next_gpadl_handle;
struct completion unload_event; /* * Represents channel interrupts. Each bit position represents a * channel. When a channel sends an interrupt via VMBUS, it finds its * bit in the sendInterruptPage, set it and calls Hv to generate a port * event. The other end receives the port event and parse the * recvInterruptPage to see which bit is set
*/ void *int_page; void *send_int_page; void *recv_int_page;
/* * 2 pages - 1st page for parent->child notification and 2nd * is child->parent notification
*/ struct hv_monitor_page *monitor_pages[2]; struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
/* List of channels */ struct list_head chn_list; struct mutex channel_mutex;
/* Array of channels */ struct vmbus_channel **channels;
/* * An offer message is handled first on the work_queue, and then * is further handled on handle_primary_chan_wq or * handle_sub_chan_wq.
*/ struct workqueue_struct *work_queue; struct workqueue_struct *handle_primary_chan_wq; struct workqueue_struct *handle_sub_chan_wq; struct workqueue_struct *rescind_work_queue;
/* * On suspension of the vmbus, the accumulated offer messages * must be dropped.
*/ bool ignore_any_offer_msg;
/* * The number of sub-channels and hv_sock channels that should be * cleaned up upon suspend: sub-channels will be re-created upon * resume, and hv_sock channels should not survive suspend.
*/
atomic_t nr_chan_close_on_suspend; /* * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to * drop to zero.
*/ struct completion ready_for_suspend_event;
/* * Completed once the host has offered all boot-time channels. * Note that some channels may still be under process on a workqueue.
*/ struct completion all_offers_delivered_event;
};
int hv_kvp_init(struct hv_util_service *srv); int hv_kvp_init_transport(void); void hv_kvp_deinit(void); int hv_kvp_pre_suspend(void); int hv_kvp_pre_resume(void); void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv); int hv_vss_init_transport(void); void hv_vss_deinit(void); int hv_vss_pre_suspend(void); int hv_vss_pre_resume(void); void hv_vss_onchannelcallback(void *context); void vmbus_initiate_unload(bool crash);
enum hvutil_device_state {
HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */
HVUTIL_READY, /* userspace is registered */
HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
HVUTIL_USERSPACE_REQ, /* request to userspace was sent */
HVUTIL_USERSPACE_RECV, /* reply from userspace was received */
HVUTIL_DEVICE_DYING, /* driver unload is in progress */
};
lockdep_assert_held(&vmbus_connection.channel_mutex); /* * List additions/deletions as well as updates of the target CPUs are * protected by channel_mutex.
*/
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (!hv_is_perf_channel(channel)) continue; if (channel->target_cpu == cpu) returntrue;
list_for_each_entry(sc, &channel->sc_list, sc_list) { if (sc->target_cpu == cpu) returntrue;
}
} returnfalse;
}
/* Create and remove sysfs entry for memory mapped ring buffers for a channel */ int hv_create_ring_sysfs(struct vmbus_channel *channel, int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma)); int hv_remove_ring_sysfs(struct vmbus_channel *channel);
#endif/* _HYPERV_VMBUS_H */
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.