// SPDX-License-Identifier: GPL-2.0-only /* * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and * initial domain support. We also handle the DSDT _PRT callbacks for GSI's * used in HVM and initial domain mode (PV does not parse ACPI, so it has no * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and * 0xcf8 PCI configuration read/write. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/ #include <linux/export.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/acpi.h>
staticint xen_pcifront_enable_irq(struct pci_dev *dev)
{ int rc; int share = 1; int pirq;
u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); if (rc) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc); return pcibios_err_to_errno(rc);
} /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi;
if (gsi < nr_legacy_irqs())
share = 0;
rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); if (rc < 0) {
dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
gsi, pirq, rc); return rc;
}
staticint xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{ int irq, ret, i; struct msi_desc *msidesc; int *v;
if (type == PCI_CAP_ID_MSI && nvec > 1) return 1;
v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL); if (!v) return -ENOMEM;
if (type == PCI_CAP_ID_MSIX)
ret = xen_pci_frontend_enable_msix(dev, v, nvec); else
ret = xen_pci_frontend_enable_msi(dev, v); if (ret) goto error;
i = 0;
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : "pcifront-msi",
DOMID_SELF); if (irq < 0) {
ret = irq; goto free;
}
i++;
}
kfree(v); return msi_device_populate_sysfs(&dev->dev);
error: if (ret == -ENOSYS)
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); elseif (ret)
dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
free:
kfree(v); return ret;
}
staticvoid xen_msi_compose_msg(struct pci_dev *pdev, unsignedint pirq, struct msi_msg *msg)
{ /* * We set vector == 0 to tell the hypervisor we don't care about * it, but we want a pirq setup instead. We use the dest_id fields * to pass the pirq that we want.
*/
memset(msg, 0, sizeof(*msg));
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
msg->arch_addr_hi.destid_8_31 = pirq >> 8;
msg->arch_addr_lo.destid_0_7 = pirq & 0xFF;
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
}
staticint xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{ int irq, pirq; struct msi_desc *msidesc; struct msi_msg msg;
domid = ret = xen_find_device_domain_owner(dev); /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
* hence check ret value for < 0. */ if (ret < 0)
domid = DOMID_SELF;
ret = -EINVAL; if (pci_seg_supported)
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
&map_irq); if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) { /* * If MAP_PIRQ_TYPE_MULTI_MSI is not available * there's nothing else we can do in this case. * Just set ret > 0 so driver can retry with * single MSI.
*/
ret = 1; goto out;
} if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
map_irq.type = MAP_PIRQ_TYPE_MSI;
map_irq.index = -1;
map_irq.pirq = -1;
map_irq.bus = dev->bus->number;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
&map_irq); if (ret != -EINVAL)
pci_seg_supported = false;
} if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid); goto out;
}
/* * This irq domain is a blatant violation of the irq domain design, but * distangling XEN into real irq domains is not a job for mere mortals with * limited XENology. But it's the least dangerous way for a mere mortal to * get rid of the arch_*_msi_irqs() hackery in order to store the irq * domain pointer in struct device. This irq domain wrappery allows to do * that without breaking XEN terminally.
*/ static __init struct irq_domain *xen_create_pci_msi_domain(void)
{ struct irq_domain *d = NULL; struct fwnode_handle *fn;
fn = irq_domain_alloc_named_fwnode("XEN-MSI"); if (fn)
d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
/* FIXME: No idea how to survive if this fails */
BUG_ON(!d);
/* * Override the PCI/MSI irq domain init function. No point * in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
}
/* Keep ACPI out of the picture */
acpi_noirq_set();
xen_setup_pci_msi(); return 0;
}
#ifdef CONFIG_PCI_MSI staticvoid __init xen_hvm_msi_init(void)
{ if (!apic_is_disabled) { /* * If hardware supports (x2)APIC virtualization (as indicated * by hypervisor's leaf 4) then we don't need to use pirqs/ * event channels for MSI handling and instead use regular * APIC processing
*/
uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
int __init pci_xen_hvm_init(void)
{ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) return 0;
#ifdef CONFIG_ACPI /* * We don't want to change the actual ACPI delivery model, * just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
__acpi_unregister_gsi = NULL; #endif
#ifdef CONFIG_PCI_MSI /* * We need to wait until after x2apic is initialized * before we can set MSI IRQ ops.
*/
x86_platform.apic_post_init = xen_hvm_msi_init; #endif return 0;
}
#ifdef CONFIG_XEN_PV_DOM0 int __init pci_xen_initial_domain(void)
{ int irq;
xen_setup_pci_msi();
__acpi_register_gsi = acpi_register_gsi_xen;
__acpi_unregister_gsi = NULL; /* * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here * because we don't have a PIC and thus nr_legacy_irqs() is zero.
*/ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { int trigger, polarity;
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.