pdev = to_pci_dev(dev); /* * If pdev is downstream of any aliasing bridges, take an upper * bound of how many other vectors could map to the same DevID. * Also tell the ITS that the signalling will come from a proxy * device, and that special allocation rules apply.
*/
pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); if (alias_dev != pdev) { if (alias_dev->subordinate)
pci_walk_bus(alias_dev->subordinate,
its_pci_msi_vec_count, &alias_count);
info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
}
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
/* * Always allocate a power of 2, and special case device 0 for * broken systems where the DevID is not wired (and all devices * appear as DevID 0). For that reason, we generously allocate a * minimum of 32 MSIs for DevID 0. If you want more because all * your devices are aliasing to DevID 0, consider fixing your HW.
*/
nvec = max(nvec, alias_count); if (!info->scratchpad[0].ul)
minnvec = 32;
nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
staticint of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
u32 *dev_id)
{ int ret, index = 0;
/* Suck the DeviceID out of the msi-parent property */ do { struct of_phandle_args args;
ret = of_parse_phandle_with_args(dev->of_node, "msi-parent", "#msi-cells",
index, &args); if (args.np == irq_domain_get_of_node(domain)) { if (WARN_ON(args.args_count != 1)) return -EINVAL;
*dev_id = args.args[0]; break;
}
index++;
} while (!ret);
if (ret) { struct device_node *np = NULL;
ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id); if (np)
of_node_put(np);
}
return ret;
}
staticint of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev,
u32 *dev_id, phys_addr_t *pa)
{ int ret, index = 0; /* * Retrieve the DeviceID and the ITS translate frame node pointer * out of the msi-parent property.
*/ do { struct of_phandle_args args;
ret = of_parse_phandle_with_args(dev->of_node, "msi-parent", "#msi-cells",
index, &args); if (ret) break; /* * The IRQ domain fwnode is the msi controller parent * in GICv5 (where the msi controller nodes are the * ITS translate frames).
*/ if (args.np->parent == irq_domain_get_of_node(domain)) { if (WARN_ON(args.args_count != 1)) return -EINVAL;
*dev_id = args.args[0];
ret = its_translate_frame_address(args.np, pa); if (ret) return -ENODEV; break;
}
index++;
} while (!ret);
if (ret) { struct device_node *np = NULL;
ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id); if (np) {
ret = its_translate_frame_address(np, pa);
of_node_put(np);
}
}
switch(info->bus_token) { case DOMAIN_BUS_PCI_DEVICE_MSI: case DOMAIN_BUS_PCI_DEVICE_MSIX: /* * FIXME: This probably should be done after a (not yet * existing) post domain creation callback once to make * support for dynamic post-enable MSI-X allocations * work without having to reevaluate the domain size * over and over. It is known already at allocation * time via info->hwsize. * * That should work perfectly fine for MSI/MSI-X but needs * some thoughts for purely software managed MSI domains * where the index space is only limited artificially via * %MSI_MAX_INDEX.
*/
info->ops->msi_prepare = its_pci_msi_prepare;
info->ops->msi_teardown = its_msi_teardown; break; case DOMAIN_BUS_DEVICE_MSI: case DOMAIN_BUS_WIRED_TO_MSI: /* * FIXME: See the above PCI prepare comment. The domain * size is also known at domain creation time.
*/
info->ops->msi_prepare = its_pmsi_prepare;
info->ops->msi_teardown = its_msi_teardown; break; default: /* Confused. How did the lib return true? */
WARN_ON_ONCE(1); returnfalse;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.