/* Values parsed from ACPI DSDT */ int vmbus_irq; int vmbus_interrupt;
/* * The panic notifier below is responsible solely for unloading the * vmbus connection, which is necessary in a panic event. * * Notice an intrincate relation of this notifier with Hyper-V * framebuffer panic notifier exists - we need vmbus connection alive * there in order to succeed, so we need to order both with each other * [see hvfb_on_panic()] - this is done using notifiers' priorities.
*/ staticint hv_panic_vmbus_unload(struct notifier_block *nb, unsignedlong val, void *args)
{
vmbus_initiate_unload(true); return NOTIFY_DONE;
} staticstruct notifier_block hyperv_panic_vmbus_unload_block = {
.notifier_call = hv_panic_vmbus_unload,
.priority = INT_MIN + 1, /* almost the latest one to execute */
};
/* * Device-level attribute_group callback function. Returns the permission for * each attribute, and returns 0 if an attribute is not visible.
*/ static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{ struct device *dev = kobj_to_dev(kobj); conststruct hv_device *hv_dev = device_to_hv_device(dev);
/* Hide the monitor attributes if the monitor mechanism is not used. */ if (!hv_dev->channel->offermsg.monitor_allocated &&
(attr == &dev_attr_monitor_id.attr ||
attr == &dev_attr_server_monitor_pending.attr ||
attr == &dev_attr_client_monitor_pending.attr ||
attr == &dev_attr_server_monitor_latency.attr ||
attr == &dev_attr_client_monitor_latency.attr ||
attr == &dev_attr_server_monitor_conn_id.attr ||
attr == &dev_attr_client_monitor_conn_id.attr)) return 0;
/* * vmbus_uevent - add uevent for our device * * This routine is invoked when a device is added or removed on the vmbus to * generate a uevent to udev in the userspace. The udev will then look at its * rule and the uevent generated here to load the appropriate driver * * The alias string will be of the form vmbus:guid where guid is the string * representation of the device guid (each byte of the guid will be * represented with two hex characters.
*/ staticint vmbus_uevent(conststruct device *device, struct kobj_uevent_env *env)
{ conststruct hv_device *dev = device_to_hv_device(device); constchar *format = "MODALIAS=vmbus:%*phN";
return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
}
/* * Return a matching hv_vmbus_device_id pointer. * If there is no match, return NULL.
*/ staticconststruct hv_vmbus_device_id *hv_vmbus_get_id(conststruct hv_driver *drv, struct hv_device *dev)
{ const guid_t *guid = &dev->dev_type; conststruct hv_vmbus_device_id *id;
/* When driver_override is set, only bind to the matching driver */ if (dev->driver_override && strcmp(dev->driver_override, drv->name)) return NULL;
/* Look at the dynamic ids first, before the static ones */
id = hv_vmbus_dynid_match((struct hv_driver *)drv, guid); if (!id)
id = hv_vmbus_dev_match(drv->id_table, guid);
/* driver_override will always match, send a dummy id */ if (!id && dev->driver_override)
id = &vmbus_device_null;
return id;
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices * * This function can race with vmbus_device_register(). This function is * typically running on a user thread in response to writing to the "new_id" * sysfs entry for a driver. vmbus_device_register() is running on a * workqueue thread in response to the Hyper-V host offering a device to the * guest. This function calls driver_attach(), which looks for an existing * device matching the new id, and attaches the driver to which the new id * has been assigned. vmbus_device_register() calls device_register(), which * looks for a driver that matches the device being registered. If both * operations are running simultaneously, the device driver probe function runs * on whichever thread establishes the linkage between the driver and device. * * In most cases, it doesn't matter which thread runs the driver probe * function. But if vmbus_device_register() does not find a matching driver, * it proceeds to create the "channels" subdirectory and numbered per-channel * subdirectory in sysfs. While that multi-step creation is in progress, this * function could run the driver probe function. If the probe function checks * for, or operates on, entries in the "channels" subdirectory, including by * calling hv_create_ring_sysfs(), the operation may or may not succeed * depending on the race. The race can't create a kernel failure in VMBus * or device subsystem code, but probe functions in VMBus drivers doing such * operations must be prepared for the failure case.
*/ staticint vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{ struct vmbus_dynid *dynid;
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM;
/* * vmbus_match - Attempt to match the specified device to the specified driver
*/ staticint vmbus_match(struct device *device, conststruct device_driver *driver)
{ conststruct hv_driver *drv = drv_to_hv_drv(driver); struct hv_device *hv_dev = device_to_hv_device(device);
/* The hv_sock driver handles all hv_sock offers. */ if (is_hvsock_channel(hv_dev->channel)) return drv->hvsock;
if (hv_vmbus_get_id(drv, hv_dev)) return 1;
return 0;
}
/* * vmbus_probe - Add the new vmbus's child device
*/ staticint vmbus_probe(struct device *child_device)
{ int ret = 0; struct hv_driver *drv =
drv_to_hv_drv(child_device->driver); struct hv_device *dev = device_to_hv_device(child_device); conststruct hv_vmbus_device_id *dev_id;
dev_id = hv_vmbus_get_id(drv, dev); if (drv->probe) {
ret = drv->probe(dev, dev_id); if (ret != 0)
pr_err("probe failed for device %s (%d)\n",
dev_name(child_device), ret);
} else {
pr_err("probe not set for driver %s\n",
dev_name(child_device));
ret = -ENODEV;
} return ret;
}
/* * vmbus_dma_configure -- Configure DMA coherence for VMbus device
*/ staticint vmbus_dma_configure(struct device *child_device)
{ /* * On ARM64, propagate the DMA coherence setting from the top level * VMbus ACPI device to the child VMbus device being added here. * On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT); return 0;
}
/* * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. * * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there * is no way to wake up a Generation-2 VM. * * The other 4 ops are for hibernation.
*/
/* * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as * it is being used in 'struct vmbus_channel_message_header' definition * which is supposed to match hypervisor ABI.
*/
BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
/* * Since the message is in memory shared with the host, an erroneous or * malicious Hyper-V could modify the message while vmbus_on_msg_dpc() * or individual message handlers are executing; to prevent this, copy * the message into private memory.
*/
memcpy(&msg_copy, msg, sizeof(struct hv_message));
message_type = msg_copy.header.message_type; if (message_type == HVMSG_NONE) /* no msg */ return;
payload_size = msg_copy.header.payload_size; if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
WARN_ONCE(1, "payload size is too large (%d)\n", payload_size); goto msg_handled;
}
entry = &channel_message_table[msgtype];
if (!entry->message_handler) goto msg_handled;
if (payload_size < entry->min_payload_len) {
WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size); goto msg_handled;
}
if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC); if (ctx == NULL) return;
/* * The host can generate a rescind message while we * may still be handling the original offer. We deal with * this condition by relying on the synchronization provided * by offer_in_progress and by channel_mutex. See also the * inline comments in vmbus_onoffer_rescind().
*/ switch (msgtype) { case CHANNELMSG_RESCIND_CHANNELOFFER: /* * If we are handling the rescind message; * schedule the work on the global work queue. * * The OFFER message and the RESCIND message should * not be handled by the same serialized work queue, * because the OFFER handler may call vmbus_open(), * which tries to open the channel by sending an * OPEN_CHANNEL message to the host and waits for * the host's response; however, if the host has * rescinded the channel before it receives the * OPEN_CHANNEL message, the host just silently * ignores the OPEN_CHANNEL message; as a result, * the guest's OFFER handler hangs for ever, if we * handle the RESCIND message in the same serialized * work queue: the RESCIND handler can not start to * run before the OFFER handler finishes.
*/ if (vmbus_connection.ignore_any_offer_msg) break;
queue_work(vmbus_connection.rescind_work_queue, &ctx->work); break;
case CHANNELMSG_OFFERCHANNEL: /* * The host sends the offer message of a given channel * before sending the rescind message of the same * channel. These messages are sent to the guest's * connect CPU; the guest then starts processing them * in the tasklet handler on this CPU: * * VMBUS_CONNECT_CPU * * [vmbus_on_msg_dpc()] * atomic_inc() // CHANNELMSG_OFFERCHANNEL * queue_work() * ... * [vmbus_on_msg_dpc()] * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER * * We rely on the memory-ordering properties of the * queue_work() and schedule_work() primitives, which * guarantee that the atomic increment will be visible * to the CPUs which will execute the offer & rescind * works by the time these works will start execution.
*/ if (vmbus_connection.ignore_any_offer_msg) break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
#ifdef CONFIG_PM_SLEEP /* * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for * hibernation, because hv_sock connections can not persist across hibernation.
*/ staticvoid vmbus_force_channel_rescinded(struct vmbus_channel *channel)
{ struct onmessage_work_context *ctx; struct vmbus_channel_rescind_offer *rescind;
WARN_ON(!is_hvsock_channel(channel));
/* * Allocation size is small and the allocation should really not fail, * otherwise the state of the hv_sock connections ends up in limbo.
*/
ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
GFP_KERNEL | __GFP_NOFAIL);
/* * So far, these are not really used by Linux. Just set them to the * reasonable values conforming to the definitions of the fields.
*/
ctx->msg.header.message_type = 1;
ctx->msg.header.payload_size = sizeof(*rescind);
/* These values are actually used by Linux. */
rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
rescind->child_relid = channel->offermsg.child_relid;
/* * Schedule all channels with events pending
*/ staticvoid vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
{ unsignedlong *recv_int_page;
u32 maxbits, relid;
/* * The event page can be directly checked to get the id of * the channel that has the interrupt pending.
*/ void *page_addr = hv_cpu->synic_event_page; union hv_synic_event_flags *event
= (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
if (!sync_test_and_clear_bit(relid, recv_int_page)) continue;
/* Special case - vmbus channel protocol msg */ if (relid == 0) continue;
/* * Pairs with the kfree_rcu() in vmbus_chan_release(). * Guarantees that the channel data structure doesn't * get freed while the channel pointer below is being * dereferenced.
*/
rcu_read_lock();
/* Find channel based on relid */
channel = relid2channel(relid); if (channel == NULL) goto sched_unlock_rcu;
if (channel->rescind) goto sched_unlock_rcu;
/* * Make sure that the ring buffer data structure doesn't get * freed while we dereference the ring buffer pointer. Test * for the channel's onchannel_callback being NULL within a * sched_lock critical section. See also the inline comments * in vmbus_reset_channel_cb().
*/
spin_lock(&channel->sched_lock);
callback_fn = channel->onchannel_callback; if (unlikely(callback_fn == NULL)) goto sched_unlock;
trace_vmbus_chan_sched(channel);
++channel->interrupts;
switch (channel->callback_mode) { case HV_CALL_ISR:
(*callback_fn)(channel->channel_callback_context); break;
case HV_CALL_BATCHED:
hv_begin_read(&channel->inbound);
fallthrough; case HV_CALL_DIRECT:
tasklet_schedule(&channel->callback_event);
}
/* Check if there are actual msgs to be processed */ if (msg->header.message_type != HVMSG_NONE) { if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
hv_stimer0_isr();
vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
} else
tasklet_schedule(&hv_cpu->msg_dpc);
}
staticvoid vmbus_percpu_work(struct work_struct *work)
{ unsignedint cpu = smp_processor_id();
hv_synic_init(cpu);
}
/* * vmbus_bus_init -Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - invoke the vmbus hv main init routine * - retrieve the channel offers
*/ staticint vmbus_bus_init(void)
{ int ret, cpu; struct work_struct __percpu *works;
ret = hv_init(); if (ret != 0) {
pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); return ret;
}
ret = bus_register(&hv_bus); if (ret) return ret;
/* * VMbus interrupts are best modeled as per-cpu interrupts. If * on an architecture with support for per-cpu IRQs (e.g. ARM64), * allocate a per-cpu IRQ using standard Linux kernel functionality. * If not on such an architecture (e.g., x86/x64), then rely on * code in the arch-specific portion of the code tree to connect * the VMbus interrupt handler.
*/
works = alloc_percpu(struct work_struct); if (!works) {
ret = -ENOMEM; goto err_alloc;
}
/* * Initialize the per-cpu interrupt state and stimer state. * Then connect to the host.
*/
cpus_read_lock();
for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu);
/* Register the callbacks for possible CPU online/offline'ing */
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
cpus_read_unlock();
free_percpu(works); if (ret < 0) goto err_alloc;
hyperv_cpuhp_online = ret;
ret = vmbus_connect(); if (ret) goto err_connect;
/* * Always register the vmbus unload panic notifier because we * need to shut the VMbus channel connection on panic.
*/
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_vmbus_unload_block);
/** * __vmbus_driver_register() - Register a vmbus's driver * @hv_driver: Pointer to driver structure you want to register * @owner: owner module of the drv * @mod_name: module name string * * Registers the given driver with Linux through the 'driver_register()' call * and sets up the hyper-v vmbus handling for this driver. * It will return the state of the 'driver_register()' call. *
*/ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, constchar *mod_name)
{ int ret;
/** * vmbus_driver_unregister() - Unregister a vmbus's driver * @hv_driver: Pointer to driver structure you want to * un-register * * Un-register the given driver that was previous registered with a call to * vmbus_driver_register()
*/ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
if (!vmbus_exists()) {
driver_unregister(&hv_driver->driver);
vmbus_free_dynids(hv_driver);
}
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
/* * Called when last reference to channel is gone.
*/ staticvoid vmbus_chan_release(struct kobject *kobj)
{ struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
if (vmbus_proto_version < VERSION_WIN10_V4_1) return -EIO;
/* Validate target_cpu for the cpumask_test_cpu() operation below. */ if (target_cpu >= nr_cpumask_bits) return -EINVAL;
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ))) return -EINVAL;
if (!cpu_online(target_cpu)) return -EINVAL;
/* * Synchronizes vmbus_channel_set_cpu() and channel closure: * * { Initially: state = CHANNEL_OPENED } * * CPU1 CPU2 * * [vmbus_channel_set_cpu()] [vmbus_disconnect_ring()] * * LOCK channel_mutex LOCK channel_mutex * LOAD r1 = state LOAD r2 = state * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED) * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN * [...] SEND CLOSECHANNEL * UNLOCK channel_mutex UNLOCK channel_mutex * * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND * * Note. The host processes the channel messages "sequentially", in * the order in which they are received on a per-partition basis.
*/
/* * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels; * avoid sending the message and fail here for such channels.
*/ if (channel->state != CHANNEL_OPENED_STATE) {
ret = -EIO; goto end;
}
origin_cpu = channel->target_cpu; if (target_cpu == origin_cpu) goto end;
if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO; goto end;
}
/* * For version before VERSION_WIN10_V5_3, the following warning holds: * * Warning. At this point, there is *no* guarantee that the host will * have successfully processed the vmbus_send_modifychannel() request. * See the header comment of vmbus_send_modifychannel() for more info. * * Lags in the processing of the above vmbus_send_modifychannel() can * result in missed interrupts if the "old" target CPU is taken offline * before Hyper-V starts sending interrupts to the "new" target CPU. * But apart from this offlining scenario, the code tolerates such * lags. It will function correctly even if a channel interrupt comes * in on a CPU that is different from the channel target_cpu value.
*/
channel->target_cpu = target_cpu;
/* See init_vp_index(). */ if (hv_is_perf_channel(channel))
hv_update_allocated_cpus(origin_cpu, target_cpu);
/* Currently set only for storvsc channels. */ if (channel->change_target_cpu_callback) {
(*channel->change_target_cpu_callback)(channel,
origin_cpu, target_cpu);
}
/* * Channel-level attribute_group callback function. Returns the permission for * each attribute, and returns 0 if an attribute is not visible.
*/ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{ conststruct vmbus_channel *channel =
container_of(kobj, struct vmbus_channel, kobj);
/* Hide the monitor attributes if the monitor mechanism is not used. */ if (!channel->offermsg.monitor_allocated &&
(attr == &chan_attr_pending.attr ||
attr == &chan_attr_latency.attr ||
attr == &chan_attr_monitor_id.attr)) return 0;
/* Hide ring attribute if channel's ring_sysfs_visible is set to false */ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible) return 0;
return attr->attr.mode;
}
static size_t vmbus_chan_bin_size(struct kobject *kobj, conststruct bin_attribute *bin_attr, int a)
{ conststruct vmbus_channel *channel =
container_of(kobj, struct vmbus_channel, kobj);
/** * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel. * @channel: Pointer to vmbus_channel structure * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of * channel's "ring" sysfs node, which is for the ring buffer of that channel. * Function pointer is of below type: * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, * struct vm_area_struct *vma)) * This has a pointer to the channel and a pointer to vm_area_struct, * used for mmap, as arguments. * * Sysfs node for ring buffer of a channel is created along with other fields, however its * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case * is running. * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid * exposing the ring buffer by default, this function is reponsible to enable visibility of * ring for userspace to use. * Note: Race conditions can happen with userspace and it is not encouraged to create new * use-cases for this. This was added to maintain backward compatibility, while solving * one of the race conditions in uio_hv_generic while creating sysfs. See comments with * vmbus_add_dynid() and vmbus_device_register(). * * Returns 0 on success or error code on failure.
*/ int hv_create_ring_sysfs(struct vmbus_channel *channel, int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma))
{ struct kobject *kobj = &channel->kobj;
/** * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel. * @channel: Pointer to vmbus_channel structure * * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group. * * Returns 0 on success or error code on failure.
*/ int hv_remove_ring_sysfs(struct vmbus_channel *channel)
{ struct kobject *kobj = &channel->kobj; int ret;
/* * vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{ conststruct device *device = &dev->device; struct kobject *kobj = &channel->kobj;
u32 relid = channel->offermsg.child_relid; int ret;
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); if (ret) {
kobject_put(kobj); return ret;
}
ret = sysfs_create_group(kobj, &vmbus_chan_group);
if (ret) { /* * The calling functions' error handling paths will cleanup the * empty channel directory.
*/
kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n"); return ret;
}
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
/* * vmbus_remove_channel_attr_group - remove the channel's attribute group
*/ void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
{
sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
}
/* * vmbus_device_create - Creates and registers a new child device * on the vmbus.
*/ struct hv_device *vmbus_device_create(const guid_t *type, const guid_t *instance, struct vmbus_channel *channel)
{ struct hv_device *child_device_obj;
child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); if (!child_device_obj) {
pr_err("Unable to allocate device object for child device\n"); return NULL;
}
/* * Register with the LDM. This will kick off the driver/device * binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device); if (ret) {
pr_err("Unable to register child device\n");
put_device(&child_device_obj->device); return ret;
}
/* * If device_register() found a driver to assign to the device, the * driver's probe function has already run at this point. If that * probe function accesses or operates on the "channels" subdirectory * in sysfs, those operations will have failed because the "channels" * subdirectory doesn't exist until the code below runs. Or if the * probe function creates a /dev entry, a user space program could * find and open the /dev entry, and then create a race by accessing * the "channels" subdirectory while the creation steps are in progress * here. The race can't result in a kernel failure, but the user space * program may get an error in accessing "channels" or its * subdirectories. See also comments with vmbus_add_dynid() about a * related race condition.
*/
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj); if (!child_device_obj->channels_kset) {
ret = -ENOMEM; goto err_dev_unregister;
}
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel); if (ret) {
pr_err("Unable to register primary channeln"); goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);
/* * vmbus_device_unregister - Remove the specified child device * from the vmbus.
*/ void vmbus_device_unregister(struct hv_device *device_obj)
{
pr_debug("child device %s unregistered\n",
dev_name(&device_obj->device));
kset_unregister(device_obj->channels_kset);
/* * Kick off the process of unregistering the device. * This will call vmbus_remove() and eventually vmbus_device_release()
*/
device_unregister(&device_obj->device);
}
EXPORT_SYMBOL_GPL(vmbus_device_unregister);
#ifdef CONFIG_ACPI /* * VMBUS is an acpi enumerated device. Get the information we * need from DSDT.
*/ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
resource_size_t start = 0;
resource_size_t end = 0; struct resource *new_res; struct resource **old_res = &hyperv_mmio; struct resource **prev_res = NULL; struct resource r;
switch (res->type) {
/* * "Address" descriptors are for bus windows. Ignore * "memory" descriptors, which are for registers on * devices.
*/ case ACPI_RESOURCE_TYPE_ADDRESS32:
start = res->data.address32.address.minimum;
end = res->data.address32.address.maximum; break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
start = res->data.address64.address.minimum;
end = res->data.address64.address.maximum; break;
/* * The IRQ information is needed only on ARM64, which Hyper-V * sets up in the extended format. IRQ information is present * on x86/x64 in the non-extended format but it is not used by * Linux. So don't bother checking for the non-extended format.
*/ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: if (!acpi_dev_resource_interrupt(res, 0, &r)) {
pr_err("Unable to parse Hyper-V ACPI interrupt\n"); return AE_ERROR;
} /* ARM64 INTID for VMbus */
vmbus_interrupt = res->data.extended_irq.interrupts[0]; /* Linux IRQ number */
vmbus_irq = r.start; return AE_OK;
default: /* Unused resource type */ return AE_OK;
} /* * Ignore ranges that are below 1MB, as they're not * necessary or useful here.
*/ if (end < 0x100000) return AE_OK;
new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); if (!new_res) return AE_NO_MEMORY;
/* If this range overlaps the virtual TPM, truncate it. */ if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
end = VTPM_BASE_ADDRESS;
if (efi_enabled(EFI_BOOT)) { /* Gen2 VM: get FB base from EFI framebuffer */ if (IS_ENABLED(CONFIG_SYSFB)) {
start = screen_info.lfb_base;
size = max_t(__u32, screen_info.lfb_size, 0x800000);
}
} else { /* Gen1 VM: get FB base from PCI */
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL); if (!pdev) return;
/* * Release the PCI device so hyperv_drm or hyperv_fb driver can * grab it later.
*/
pci_dev_put(pdev);
}
if (!start) return;
/* * Make a claim for the frame buffer in the resource tree under the * first node, which will be the one below 4GB. The length seems to * be underreported, particularly in a Generation 1 VM. So start out * reserving a larger area and make it smaller until it succeeds.
*/ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/** * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. * @new: If successful, supplied a pointer to the * allocated MMIO space. * @device_obj: Identifies the caller * @min: Minimum guest physical address of the * allocation * @max: Maximum guest physical address * @size: Size of the range to be allocated * @align: Alignment of the range to be allocated * @fb_overlap_ok: Whether this allocation can be allowed * to overlap the video frame buffer. * * This function walks the resources granted to VMBus by the * _CRS object in the ACPI namespace underneath the parent * "bridge" whether that's a root PCI bus in the Generation 1 * case or a Module Device in the Generation 2 case. It then * attempts to allocate from the global MMIO pool in a way that * matches the constraints supplied in these parameters and by * that _CRS. * * Return: 0 on success, -errno on failure
*/ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t min, resource_size_t max,
resource_size_t size, resource_size_t align, bool fb_overlap_ok)
{ struct resource *iter, *shadow;
resource_size_t range_min, range_max, start, end; constchar *dev_n = dev_name(&device_obj->device); int retval;
retval = -ENXIO;
mutex_lock(&hyperv_mmio_lock);
/* * If overlaps with frame buffers are allowed, then first attempt to * make the allocation from within the reserved region. Because it * is already reserved, no shadow allocation is necessary.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.26 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.