// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) Microsoft Corporation. * * Author: * Jake Oshins <jakeo@microsoft.com> * * This driver acts as a paravirtual front-end for PCI Express root buses. * When a PCI Express function (either an entire device or an SR-IOV * Virtual Function) is being passed through to the VM, this driver exposes * a new bus to the guest VM. This is modeled as a root PCI bus because * no bridges are being exposed to the VM. In fact, with a "Generation 2" * VM within Hyper-V, there may seem to be no PCI bus at all in the VM * until a device as been exposed using this driver. * * Each root PCI bus has its own PCI domain, which is called "Segment" in * the PCI Firmware Specifications. Thus while each device passed through * to the VM using this front-end will appear at "device 0", the domain will * be unique. Typically, each bus will have one PCI function on it, though * this driver does support more than one. * * In order to map the interrupts from the device through to the guest VM, * this driver also implements an IRQ Domain, which handles interrupts (either * MSI or MSI-X) associated with the functions on the bus. As interrupts are * set up, torn down, or reaffined, this driver communicates with the * underlying hypervisor to adjust the mappings in the I/O MMU so that each * interrupt will be delivered to the correct virtual processor at the right * vector. This driver does not support level-triggered (line-based) * interrupts, and will report that the Interrupt Line register in the * function's configuration space is zero. * * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V * facilities. For instance, the configuration space of a function exposed * by Hyper-V is mapped into a single page of memory space, and the * read and write handlers for config space must be aware of this mechanism. * Similarly, device setup and teardown involves messages sent to and from * the PCI back-end driver in Hyper-V.
*/
/* space for 32bit serial number as string */ #define SLOT_NAME_SIZE 11
/* * Size of requestor for VMbus; the value is based on the observation * that having more than one request outstanding is 'rare', and so 64 * should be generous in ensuring that we don't ever run out.
*/ #define HV_PCI_RQSTOR_SIZE 64
/* * Function numbers are 8-bits wide on Express, as interpreted through ARI, * which is all this driver does. This representation is the one used in * Windows, which is what is expected when sending this back and forth with * the Hyper-V parent partition.
*/ union win_slot_encoding { struct {
u32 dev:5;
u32 func:3;
u32 reserved:24;
} bits;
u32 slot;
} __packed;
/* * Pretty much as defined in the PCI Specifications.
*/ struct pci_function_description {
u16 v_id; /* vendor ID */
u16 d_id; /* device ID */
u8 rev;
u8 prog_intf;
u8 subclass;
u8 base_class;
u32 subsystem_id; union win_slot_encoding win_slot;
u32 ser; /* serial number */
} __packed;
struct pci_function_description2 {
u16 v_id; /* vendor ID */
u16 d_id; /* device ID */
u8 rev;
u8 prog_intf;
u8 subclass;
u8 base_class;
u32 subsystem_id; union win_slot_encoding win_slot;
u32 ser; /* serial number */
u32 flags;
u16 virtual_numa_node;
u16 reserved;
} __packed;
/** * struct hv_msi_desc * @vector: IDT entry * @delivery_mode: As defined in Intel's Programmer's * Reference Manual, Volume 3, Chapter 8. * @vector_count: Number of contiguous entries in the * Interrupt Descriptor Table that are * occupied by this Message-Signaled * Interrupt. For "MSI", as first defined * in PCI 2.2, this can be between 1 and * 32. For "MSI-X," as first defined in PCI * 3.0, this must be 1, as each MSI-X table * entry would have its own descriptor. * @reserved: Empty space * @cpu_mask: All the target virtual processors.
*/ struct hv_msi_desc {
u8 vector;
u8 delivery_mode;
u16 vector_count;
u32 reserved;
u64 cpu_mask;
} __packed;
/** * struct hv_msi_desc2 - 1.2 version of hv_msi_desc * @vector: IDT entry * @delivery_mode: As defined in Intel's Programmer's * Reference Manual, Volume 3, Chapter 8. * @vector_count: Number of contiguous entries in the * Interrupt Descriptor Table that are * occupied by this Message-Signaled * Interrupt. For "MSI", as first defined * in PCI 2.2, this can be between 1 and * 32. For "MSI-X," as first defined in PCI * 3.0, this must be 1, as each MSI-X table * entry would have its own descriptor. * @processor_count: number of bits enabled in array. * @processor_array: All the target virtual processors.
*/ struct hv_msi_desc2 {
u8 vector;
u8 delivery_mode;
u16 vector_count;
u16 processor_count;
u16 processor_array[32];
} __packed;
/* * struct hv_msi_desc3 - 1.3 version of hv_msi_desc * Everything is the same as in 'hv_msi_desc2' except that the size of the * 'vector' field is larger to support bigger vector values. For ex: LPI * vectors on ARM.
*/ struct hv_msi_desc3 {
u32 vector;
u8 delivery_mode;
u8 reserved;
u16 vector_count;
u16 processor_count;
u16 processor_array[32];
} __packed;
/** * struct tran_int_desc * @reserved: unused, padding * @vector_count: same as in hv_msi_desc * @data: This is the "data payload" value that is * written by the device when it generates * a message-signaled interrupt, either MSI * or MSI-X. * @address: This is the address to which the data * payload is written on interrupt * generation.
*/ struct tran_int_desc {
u16 reserved;
u16 vector_count;
u32 data;
u64 address;
} __packed;
/* * A generic message format for virtual PCI. * Specific message formats are defined later in the file.
*/
/* * Specific message types supporting the PCI protocol.
*/
/* * Version negotiation message. Sent from the guest to the host. * The guest is free to try different versions until the host * accepts the version. * * pci_version: The protocol version requested. * is_last_attempt: If TRUE, this is the last version guest will request. * reservedz: Reserved field, set to zero.
*/
/* * Note: the VM must pass a valid block id, wslot and bytes_requested.
*/ struct pci_read_block { struct pci_message message_type;
u32 block_id; union win_slot_encoding wslot;
u32 bytes_requested;
} __packed;
/* Highest slot of child device with resources allocated */ int wslot_res_allocated; bool use_calls; /* Use hypercalls to access mmio cfg space */
};
/* * Tracks "Device Relations" messages from the host, which must be both * processed in order and deferred so that they don't run in the context * of the incoming packet callback.
*/ struct hv_dr_work { struct work_struct wrk; struct hv_pcibus_device *bus;
};
struct hv_pcidev_description {
u16 v_id; /* vendor ID */
u16 d_id; /* device ID */
u8 rev;
u8 prog_intf;
u8 subclass;
u8 base_class;
u32 subsystem_id; union win_slot_encoding win_slot;
u32 ser; /* serial number */
u32 flags;
u16 virtual_numa_node;
};
/* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space.
*/
u32 probed_bar[PCI_STD_NUM_BARS];
};
/** * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current * affinity. * @data: Describes the IRQ * * Build new a destination for the MSI and make a hypercall to * update the Interrupt Redirection Table. "Device Logical ID" * is built out of this PCI bus's instance GUID and the function * number of the device.
*/ staticvoid hv_irq_retarget_interrupt(struct irq_data *data)
{ struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; conststruct cpumask *dest;
cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; unsignedlong flags;
u32 var_size = 0; int cpu, nr_bank;
u64 res;
dest = irq_data_get_effective_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data; if (!int_desc) {
dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
__func__, data->irq); return;
}
if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { /* * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides * with >64 VP support. * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED * is not sufficient for this hypercall.
*/
params->int_target.flags |=
HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
res = 1; goto out;
}
/* * var-sized hypercall, var-size starts after vp_mask (thus * vp_set.format does not count, but vp_set.valid_bank_mask * does).
*/
var_size = 1 + nr_bank;
} else {
for_each_cpu_and(cpu, dest, cpu_online_mask) {
params->int_target.vp_mask |=
(1ULL << hv_cpu_number_to_vp_number(cpu));
}
}
res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
params, NULL);
out:
local_irq_restore(flags);
/* * During hibernation, when a CPU is offlined, the kernel tries * to move the interrupt to the remaining CPUs that haven't * been offlined yet. In this case, the below hv_do_hypercall() * always fails since the vmbus channel has been closed: * refer to cpu_disable_common() -> fixup_irqs() -> * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). * * Suppress the error message for hibernation because the failure * during hibernation does not matter (at this time all the devices * have been frozen). Note: the correct affinity info is still updated * into the irqdata data structure in migrate_one_irq() -> * irq_do_set_affinity(), so later when the VM resumes, * hv_pci_restore_msi_state() is able to correctly restore the * interrupt with the correct affinity.
*/ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
dev_err(&hbus->hdev->device, "%s() failed: %#llx", __func__, res);
}
staticvoid hv_arch_irq_unmask(struct irq_data *data)
{ if (hv_root_partition()) /* * In case of the nested root partition, the nested hypervisor * is taking care of interrupt remapping and thus the * MAP_DEVICE_INTERRUPT hypercall is required instead of * RETARGET_INTERRUPT.
*/
(void)hv_map_msi_interrupt(data, NULL); else
hv_irq_retarget_interrupt(data);
} #elifdefined(CONFIG_ARM64) /* * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit * of room at the start to allow for SPIs to be specified through ACPI and * starting with a power of two to satisfy power of 2 multi-MSI requirement.
*/ #define HV_PCI_MSI_SPI_START 64 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) #define DELIVERY_MODE 0 #define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI #define hv_msi_prepare NULL
/* * @nr_bm_irqs: Indicates the number of IRQs that were allocated from * the bitmap. * @nr_dom_irqs: Indicates the number of IRQs that were allocated from * the parent domain.
*/ staticvoid hv_pci_vec_irq_free(struct irq_domain *domain, unsignedint virq, unsignedint nr_bm_irqs, unsignedint nr_dom_irqs)
{ struct hv_pci_chip_data *chip_data = domain->host_data; struct irq_data *d = irq_domain_get_irq_data(domain, virq); int first = d->hwirq - HV_PCI_MSI_SPI_START; int i;
mutex_lock(&chip_data->map_lock);
bitmap_release_region(chip_data->spi_map,
first,
get_count_order(nr_bm_irqs));
mutex_unlock(&chip_data->map_lock); for (i = 0; i < nr_dom_irqs; i++) { if (i)
d = irq_domain_get_irq_data(domain, virq + i);
irq_domain_reset_irq_data(d);
}
/* Find and allocate region from the SPI bitmap */
mutex_lock(&chip_data->map_lock);
index = bitmap_find_free_region(chip_data->spi_map,
HV_PCI_MSI_SPI_NR,
get_count_order(nr_irqs));
mutex_unlock(&chip_data->map_lock); if (index < 0) return -ENOSPC;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (ret) return ret;
/* * Since the interrupt specifier is not coming from ACPI or DT, the * trigger type will need to be set explicitly. Otherwise, it will be * set to whatever is in the GIC configuration.
*/
d = irq_domain_get_irq_data(domain->parent, virq);
ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); if (ret) return ret;
for (i = 0; i < nr_irqs; i++) {
ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
hwirq + i); if (ret) {
hv_pci_vec_irq_free(domain, virq, nr_irqs, i); return ret;
}
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq + i,
&hv_arm64_msi_irq_chip,
domain->host_data);
pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
}
return 0;
}
/* * Pick the first cpu as the irq affinity that can be temporarily used for * composing MSI from the hypervisor. GIC will eventually set the right * affinity for the irq and the 'unmask' will retarget the interrupt to that * cpu.
*/ staticint hv_pci_vec_irq_domain_activate(struct irq_domain *domain, struct irq_data *irqd, bool reserve)
{ int cpu = cpumask_first(cpu_present_mask);
staticint hv_pci_irqchip_init(void)
{ staticstruct hv_pci_chip_data *chip_data; struct fwnode_handle *fn = NULL; struct irq_domain *irq_domain_parent = NULL; int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); if (!chip_data) return ret;
mutex_init(&chip_data->map_lock);
fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); if (!fn) goto free_chip;
/* * IRQ domain once enabled, should not be removed since there is no * way to ensure that all the corresponding devices are also gone and * no interrupts will be generated.
*/ #ifdef CONFIG_ACPI if (!acpi_disabled)
irq_domain_parent = hv_pci_acpi_irq_domain_parent(); #endif #ifdef CONFIG_OF if (!irq_domain_parent)
irq_domain_parent = hv_pci_of_irq_domain_parent(); #endif if (!irq_domain_parent) {
WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
ret = -EINVAL; goto free_chip;
}
/* * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD * registers which Hyper-V already supports, so no hypercall needed.
*/ staticvoid hv_arch_irq_unmask(struct irq_data *data) { } #endif/* CONFIG_ARM64 */
/** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. * @resp: The response packet * @resp_packet_size: Size in bytes of the packet * * This function is used to trigger an event and report status * for any message for which the completion packet contains a * status and nothing else.
*/ staticvoid hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size)
{ struct hv_pci_compl *comp_pkt = context;
staticvoid put_pcichild(struct hv_pci_dev *hpdev)
{ if (refcount_dec_and_test(&hpdev->refs))
kfree(hpdev);
}
/* * There is no good way to get notified from vmbus_onoffer_rescind(), * so let's use polling here, since this is not a hot path.
*/ staticint wait_for_response(struct hv_device *hdev, struct completion *comp)
{ while (true) { if (hdev->channel->rescind) {
dev_warn_once(&hdev->device, "The device is gone.\n"); return -ENODEV;
}
if (wait_for_completion_timeout(comp, HZ / 10)) break;
}
return 0;
}
/** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot * * Windows uses a slightly different representation of PCI slot. * * Return: The Windows representation
*/ static u32 devfn_to_wslot(int devfn)
{ union win_slot_encoding wslot;
/** * wslot_to_devfn() - Convert from Windows PCI slot to Linux * @wslot: The Windows representation of PCI slot * * Windows uses a slightly different representation of PCI slot. * * Return: The Linux representation
*/ staticint wslot_to_devfn(u32 wslot)
{ union win_slot_encoding slot_no;
/* * Must be called with interrupts disabled so it is safe * to use the per-cpu input argument page. Use it for * both input and output.
*/
in = *this_cpu_ptr(hyperv_pcpu_input_arg);
out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
in->gpa = gpa;
in->size = size;
ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out); if (hv_result_success(ret)) { switch (size) { case 1:
*val = *(u8 *)(out->data); break; case 2:
*val = *(u16 *)(out->data); break; default:
*val = *(u32 *)(out->data); break;
}
} else
dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
ret, gpa, size);
}
/* * Must be called with interrupts disabled so it is safe * to use the per-cpu input argument memory.
*/
in = *this_cpu_ptr(hyperv_pcpu_input_arg);
in->gpa = gpa;
in->size = size; switch (size) { case 1:
*(u8 *)(in->data) = val; break; case 2:
*(u16 *)(in->data) = val; break; default:
*(u32 *)(in->data) = val; break;
}
ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL); if (!hv_result_success(ret))
dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
ret, gpa, size);
}
/* * PCI Configuration Space for these root PCI buses is implemented as a pair * of pages in memory-mapped I/O space. Writing to the first page chooses * the PCI function being written or read. Once the first page has been * written to, the following page maps in the entire configuration space of * the function.
*/
/** * _hv_pcifront_read_config() - Internal PCI config read * @hpdev: The PCI driver's representation of the device * @where: Offset within config space * @size: Size of the transfer * @val: Pointer to the buffer receiving the data
*/ staticvoid _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, u32 *val)
{ struct hv_pcibus_device *hbus = hpdev->hbus; struct device *dev = &hbus->hdev->device; int offset = where + CFG_PAGE_OFFSET; unsignedlong flags;
/* * If the attempt is to read the IDs or the ROM BAR, simulate that.
*/ if (where + size <= PCI_COMMAND) {
memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
} elseif (where >= PCI_CLASS_REVISION && where + size <=
PCI_CACHE_LINE_SIZE) {
memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
PCI_CLASS_REVISION, size);
} elseif (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
PCI_ROM_ADDRESS) {
memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
PCI_SUBSYSTEM_VENDOR_ID, size);
} elseif (where >= PCI_ROM_ADDRESS && where + size <=
PCI_CAPABILITY_LIST) { /* ROM BARs are unimplemented */
*val = 0;
} elseif ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
(where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts.
*/
*val = 0;
} elseif (where + size <= CFG_PAGE_SIZE) {
/* Choose the function to be read. (See comment above) */
writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before reading. */
mb(); /* Read from that function's config space. */ switch (size) { case 1:
*val = readb(addr); break; case 2:
*val = readw(addr); break; default:
*val = readl(addr); break;
} /* * Make sure the read was done before we release the * spinlock allowing consecutive reads/writes.
*/
mb();
}
spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
dev_err(dev, "Attempt to read beyond a function's config space.\n");
}
}
hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
hpdev->desc.win_slot.slot);
hv_pci_read_mmio(dev, addr, 2, &val);
ret = val; /* Truncates to 16 bits */
} else { void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
PCI_VENDOR_ID; /* Choose the function to be read. (See comment above) */
writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before we start reading. */
mb(); /* Read from that function's config space. */
ret = readw(addr); /* * mb() is not required here, because the * spin_unlock_irqrestore() is a barrier.
*/
}
/** * _hv_pcifront_write_config() - Internal PCI config write * @hpdev: The PCI driver's representation of the device * @where: Offset within config space * @size: Size of the transfer * @val: The data being transferred
*/ staticvoid _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, u32 val)
{ struct hv_pcibus_device *hbus = hpdev->hbus; struct device *dev = &hbus->hdev->device; int offset = where + CFG_PAGE_OFFSET; unsignedlong flags;
if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
where + size <= PCI_CAPABILITY_LIST) { /* SSIDs and ROM BARs are read-only */
} elseif (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
spin_lock_irqsave(&hbus->config_lock, flags);
if (hbus->use_calls) {
phys_addr_t addr = hbus->mem_config->start + offset;
/* Choose the function to write. (See comment above) */
writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before writing. */
wmb(); /* Write to that function's config space. */ switch (size) { case 1:
writeb(val, addr); break; case 2:
writew(val, addr); break; default:
writel(val, addr); break;
} /* * Make sure the write was done before we release the * spinlock allowing consecutive reads/writes.
*/
mb();
}
spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
dev_err(dev, "Attempt to write beyond a function's config space.\n");
}
}
/** * hv_pcifront_read_config() - Read configuration space * @bus: PCI Bus structure * @devfn: Device/function * @where: Offset from base * @size: Byte/word/dword * @val: Value to be read * * Return: PCIBIOS_SUCCESSFUL on success * PCIBIOS_DEVICE_NOT_FOUND on failure
*/ staticint hv_pcifront_read_config(struct pci_bus *bus, unsignedint devfn, int where, int size, u32 *val)
{ struct hv_pcibus_device *hbus =
container_of(bus->sysdata, struct hv_pcibus_device, sysdata); struct hv_pci_dev *hpdev;
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); if (!hpdev) return PCIBIOS_DEVICE_NOT_FOUND;
/* * Paravirtual backchannel * * Hyper-V SR-IOV provides a backchannel mechanism in software for * communication between a VF driver and a PF driver. These * "configuration blocks" are similar in concept to PCI configuration space, * but instead of doing reads and writes in 32-bit chunks through a very slow * path, packets of up to 128 bytes can be sent or received asynchronously. * * Nearly every SR-IOV device contains just such a communications channel in * hardware, so using this one in software is usually optional. Using the * software channel, however, allows driver implementers to leverage software * tools that fuzz the communications channel looking for vulnerabilities. * * The usage model for these packets puts the responsibility for reading or * writing on the VF driver. The VF driver sends a read or a write packet, * indicating which "block" is being referred to by number. * * If the PF driver wishes to initiate communication, it can "invalidate" one or * more of the first 64 blocks. This invalidation is delivered via a callback * supplied to the VF driver by this driver. * * No protocol is implied, except that supplied by the PF and VF drivers.
*/
/** * hv_read_config_block() - Sends a read config block request to * the back-end driver running in the Hyper-V parent partition. * @pdev: The PCI driver's representation for this device. * @buf: Buffer into which the config block will be copied. * @len: Size in bytes of buf. * @block_id: Identifies the config block which has been requested. * @bytes_returned: Size which came back from the back-end driver. * * Return: 0 on success, -errno on failure
*/ staticint hv_read_config_block(struct pci_dev *pdev, void *buf, unsignedint len, unsignedint block_id, unsignedint *bytes_returned)
{ struct hv_pcibus_device *hbus =
container_of(pdev->bus->sysdata, struct hv_pcibus_device,
sysdata); struct { struct pci_packet pkt; char buf[sizeof(struct pci_read_block)];
} pkt; struct hv_read_config_compl comp_pkt; struct pci_read_block *read_blk; int ret;
if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) return -EINVAL;
/** * hv_write_config_block() - Sends a write config block request to the * back-end driver running in the Hyper-V parent partition. * @pdev: The PCI driver's representation for this device. * @buf: Buffer from which the config block will be copied. * @len: Size in bytes of buf. * @block_id: Identifies the config block which is being written. * * Return: 0 on success, -errno on failure
*/ staticint hv_write_config_block(struct pci_dev *pdev, void *buf, unsignedint len, unsignedint block_id)
{ struct hv_pcibus_device *hbus =
container_of(pdev->bus->sysdata, struct hv_pcibus_device,
sysdata); struct { struct pci_packet pkt; char buf[sizeof(struct pci_write_block)];
u32 reserved;
} pkt; struct hv_pci_compl comp_pkt; struct pci_write_block *write_blk;
u32 pkt_size; int ret;
if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) return -EINVAL;
init_completion(&comp_pkt.host_event);
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
write_blk->byte_count = len;
memcpy(write_blk->bytes, buf, len);
pkt_size = offsetof(struct pci_write_block, bytes) + len; /* * This quirk is required on some hosts shipped around 2018, because * these hosts don't check the pkt_size correctly (new hosts have been * fixed since early 2019). The quirk is also safe on very old hosts * and new hosts, because, on them, what really matters is the length * specified in write_blk->byte_count.
*/
pkt_size += sizeof(pkt.reserved);
ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
(unsignedlong)&pkt.pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret;
ret = wait_for_response(hbus->hdev, &comp_pkt.host_event); if (ret) return ret;
/** * hv_msi_free() - Free the MSI. * @domain: The interrupt domain pointer * @info: Extra MSI-related context * @irq: Identifies the IRQ. * * The Hyper-V parent partition and hypervisor are tracking the * messages that are in use, keeping the interrupt redirection * table up to date. This callback sends a message that frees * the IRT entry and related tracking nonsense.
*/ staticvoid hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsignedint irq)
{ struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; struct tran_int_desc *int_desc; struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
/* * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in * hv_irq_unmask().
*/
int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
returnsizeof(*int_pkt);
}
/* * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is * not irrelevant because Hyper-V chooses the physical CPU to handle the * interrupts based on the vCPU specified in message sent to the vPCI VSP in * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest, * but assigning too many vPCI device interrupts to the same pCPU can cause a * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V * to spread out the pCPUs that it selects. * * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu() * to always return the same dummy vCPU, because a second call to * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a * new pCPU for the interrupt. But for the multi-MSI case, the second call to * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that * the pCPUs are spread out. All interrupts for a multi-MSI device end up using * the same pCPU, even though the vCPUs will be spread out by later calls * to hv_irq_unmask(), but that is the best we can do now. * * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not* * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an * enhancement is planned for a future version. With that enhancement, the * dummy vCPU selection won't matter, and interrupts for the same multi-MSI * device will be spread across multiple pCPUs.
*/
/* * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask().
*/ staticint hv_compose_msi_req_get_cpu(conststruct cpumask *affinity)
{ return cpumask_first_and(affinity, cpu_online_mask);
}
/* * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
*/ staticint hv_compose_multi_msi_req_get_cpu(void)
{ static DEFINE_SPINLOCK(multi_msi_cpu_lock);
/* -1 means starting with CPU 0 */ staticint cpu_next = -1;
unsignedlong flags; int cpu;
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
/** * hv_compose_msi_msg() - Supplies a valid MSI address/data * @data: Everything about this MSI * @msg: Buffer that is filled in by this function * * This function unpacks the IRQ looking for target CPU set, IDT * vector and mode and sends a message to the parent partition * asking for a mapping for that tuple in this partition. The * response supplies a data value and address to which that data * should be written to trigger that interrupt.
*/ staticvoid hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{ struct hv_pcibus_device *hbus; struct vmbus_channel *channel; struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; conststruct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; /* * vector_count should be u16: see hv_msi_desc, hv_msi_desc2 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
*/
u16 vector_count;
u32 vector; struct { struct pci_packet pci_pkt; union { struct pci_create_interrupt v1; struct pci_create_interrupt2 v2; struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt; bool multi_msi;
u64 trans_id;
u32 size; int ret; int cpu;
/* Free any previous message that might have already been composed. */ if (data->chip_data && !multi_msi) {
int_desc = data->chip_data;
data->chip_data = NULL;
hv_int_desc_free(hpdev, int_desc);
}
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference;
if (multi_msi) { /* * If this is not the first MSI of Multi MSI, we already have * a mapping. Can exit early.
*/ if (msi_desc->irq != data->irq) {
data->chip_data = int_desc;
int_desc->address = msi_desc->msg.address_lo |
(u64)msi_desc->msg.address_hi << 32;
int_desc->data = msi_desc->msg.data +
(data->irq - msi_desc->irq);
msg->address_hi = msi_desc->msg.address_hi;
msg->address_lo = msi_desc->msg.address_lo;
msg->data = int_desc->data;
put_pcichild(hpdev); return;
} /* * The vector we select here is a dummy value. The correct * value gets sent to the hypervisor in unmask(). This needs * to be aligned with the count, and also not zero. Multi-msi * is powers of 2 up to 32, so 32 will always work here.
*/
vector = 32;
vector_count = msi_desc->nvec_used;
cpu = hv_compose_multi_msi_req_get_cpu();
} else {
vector = hv_msi_get_int_vector(data);
vector_count = 1;
cpu = hv_compose_msi_req_get_cpu(dest);
}
/* * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector' * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly * for better readability.
*/
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
ctxt.pci_pkt.compl_ctxt = ∁
case PCI_PROTOCOL_VERSION_1_2: case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
cpu,
hpdev->desc.win_slot.slot,
(u8)vector,
vector_count); break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
cpu,
hpdev->desc.win_slot.slot,
vector,
vector_count); break;
default: /* As we only negotiate protocol versions known to this driver, * this path should never hit. However, this is it not a hot * path so we print a message to aid future updates.
*/
dev_err(&hbus->hdev->device, "Unexpected vPCI protocol, update driver."); goto free_int_desc;
}
ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
size, (unsignedlong)&ctxt.pci_pkt,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) {
dev_err(&hbus->hdev->device, "Sending request for interrupt failed: 0x%x",
comp.comp_pkt.completion_status); goto free_int_desc;
}
/* * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet.
*/
tasklet_disable_in_atomic(&channel->callback_event);
/* * Since this function is called with IRQ locks held, can't * do normal wait for completion; instead poll.
*/ while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { unsignedlong flags;
/* 0xFFFF means an invalid PCI VENDOR ID. */ if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
dev_err_once(&hbus->hdev->device, "the device has gone\n"); goto enable_tasklet;
}
/* * Make sure that the ring buffer data structure doesn't get * freed while we dereference the ring buffer pointer. Test * for the channel's onchannel_callback being NULL within a * sched_lock critical section. See also the inline comments * in vmbus_reset_channel_cb().
*/
spin_lock_irqsave(&channel->sched_lock, flags); if (unlikely(channel->onchannel_callback == NULL)) {
spin_unlock_irqrestore(&channel->sched_lock, flags); goto enable_tasklet;
}
hv_pci_onchannelcallback(hbus);
spin_unlock_irqrestore(&channel->sched_lock, flags);
udelay(100);
}
tasklet_enable(&channel->callback_event);
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device, "Request for interrupt failed: 0x%x",
comp.comp_pkt.completion_status); goto free_int_desc;
}
/* * Record the assignment so that this can be unwound later. Using * irq_set_chip_data() here would be appropriate, but the lock it takes * is already held.
*/
*int_desc = comp.int_desc;
data->chip_data = int_desc;
/* Pass up the result. */
msg->address_hi = comp.int_desc.address >> 32;
msg->address_lo = comp.int_desc.address & 0xffffffff;
msg->data = comp.int_desc.data;
put_pcichild(hpdev); return;
enable_tasklet:
tasklet_enable(&channel->callback_event); /* * The completion packet on the stack becomes invalid after 'return'; * remove the ID from the VMbus requestor if the identifier is still * mapped to/associated with the packet. (The identifier could have * been 're-used', i.e., already removed and (re-)mapped.) * * Cf. hv_pci_onchannelcallback().
*/
vmbus_request_addr_match(channel, trans_id, (unsignedlong)&ctxt.pci_pkt);
free_int_desc:
kfree(int_desc);
drop_reference:
put_pcichild(hpdev);
return_null_message:
msg->address_hi = 0;
msg->address_lo = 0;
msg->data = 0;
}
staticint hv_pcie_domain_alloc(struct irq_domain *d, unsignedint virq, unsignedint nr_irqs, void *arg)
{ /* * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg() * should be moved here.
*/ int ret;
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg); if (ret < 0) return ret;
for (int i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL); if (IS_ENABLED(CONFIG_X86))
__irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
}
/** * hv_pcie_init_irq_domain() - Initialize IRQ domain * @hbus: The root PCI bus * * This function creates an IRQ domain which will be used for * interrupts from devices that have been passed through. These * devices only support MSI and MSI-X, not line-based interrupts * or simulations of line-based interrupts through PCIe's * fabric-layer messages. Because interrupts are remapped, we * can support multi-message MSI here. * * Return: '0' on success and error value on failure
*/ staticint hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
{ struct irq_domain_info info = {
.fwnode = hbus->fwnode,
.ops = &hv_pcie_domain_ops,
.host_data = hbus,
.parent = hv_pci_get_root_domain(),
};
hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops); if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device, "Failed to build an MSI IRQ domain\n"); return -ENODEV;
}
/** * get_bar_size() - Get the address space consumed by a BAR * @bar_val: Value that a BAR returned after -1 was written * to it. * * This function returns the size of the BAR, rounded up to 1 * page. It has to be rounded up because the hypervisor's page * table entry that maps the BAR into the VM can't specify an * offset within a page. The invariant is that the hypervisor * must place any BARs of smaller than page length at the * beginning of a page. * * Return: Size in bytes of the consumed MMIO space.
*/ static u64 get_bar_size(u64 bar_val)
{ return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
PAGE_SIZE);
}
/** * survey_child_resources() - Total all MMIO requirements * @hbus: Root PCI bus, as understood by this driver
*/ staticvoid survey_child_resources(struct hv_pcibus_device *hbus)
{ struct hv_pci_dev *hpdev;
resource_size_t bar_size = 0; unsignedlong flags; struct completion *event;
u64 bar_val; int i;
/* If nobody is waiting on the answer, don't compute it. */
event = xchg(&hbus->survey_event, NULL); if (!event) return;
/* If the answer has already been computed, go with it. */ if (hbus->low_mmio_space || hbus->high_mmio_space) {
complete(event); return;
}
/* * Due to an interesting quirk of the PCI spec, all memory regions * for a child device are a power of 2 in size and aligned in memory, * so it's sufficient to just add them up without tracking alignment.
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) { for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device, "There's an I/O BAR in this list!\n");
if (hpdev->probed_bar[i] != 0) { /* * A probed BAR has all the upper bits set that * can be changed.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.