/** * msix_initialize() - Calculate, request and configure MSIx IRQs * @dd: valid hfi1 devdata *
*/ int msix_initialize(struct hfi1_devdata *dd)
{
u32 total; int ret; struct hfi1_msix_entry *entries;
/* * MSIx interrupt count: * one for the general, "slow path" interrupt * one per used SDMA engine * one per kernel receive context * one for each VNIC context * ...any new IRQs should be added here.
*/
total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
if (total >= CCE_NUM_MSIX_VECTORS) return -EINVAL;
ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX); if (ret < 0) {
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret); return ret;
}
/* * assign arg after pci_request_irq call, so it will be * cleaned up
*/
me = &dd->msix_info.msix_entries[nr];
me->irq = irq;
me->arg = arg;
me->type = type;
/* This is a request, so a failure is not fatal */
ret = hfi1_get_irq_affinity(dd, me); if (ret)
dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
return nr;
}
staticint msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
irq_handler_t handler,
irq_handler_t thread, constchar *name)
{ int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
name); if (nr < 0) return nr;
/* * Set the interrupt register and mask for this * context's interrupt.
*/
rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
rcd->msix_intr = nr;
remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
return 0;
}
/** * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs * @rcd: valid rcd context *
*/ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
{ char name[MAX_NAME_SIZE];
/** * msix_request_general_irq - Helper for getting general IRQ * resources * @dd: valid device data
*/ int msix_request_general_irq(struct hfi1_devdata *dd)
{ int nr; char name[MAX_NAME_SIZE];
snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
name); if (nr < 0) return nr;
/* general interrupt must be MSIx vector 0 */ if (nr) {
msix_free_irq(dd, (u8)nr);
dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr); return -EINVAL;
}
return 0;
}
/** * enable_sdma_srcs - Helper to enable SDMA IRQ srcs * @dd: valid devdata structure * @i: index of SDMA engine
*/ staticvoid enable_sdma_srcs(struct hfi1_devdata *dd, int i)
{
set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
IS_SDMA_PROGRESS_START + i, true);
set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i, true);
}
/** * msix_request_irqs() - Allocate all MSIx IRQs * @dd: valid devdata structure * * Helper function to request the used MSIx IRQs. *
*/ int msix_request_irqs(struct hfi1_devdata *dd)
{ int i; int ret = msix_request_general_irq(dd);
if (ret) return ret;
for (i = 0; i < dd->num_sdma; i++) { struct sdma_engine *sde = &dd->per_sdma[i];
ret = msix_request_sdma_irq(sde); if (ret) return ret;
enable_sdma_srcs(sde->dd, i);
}
for (i = 0; i < dd->n_krcv_queues; i++) { struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
if (rcd)
ret = msix_request_rcd_irq(rcd);
hfi1_rcd_put(rcd); if (ret) return ret;
}
/** * msix_clean_up_interrupts - Free all MSIx IRQ resources * @dd: valid device data data structure * * Free the MSIx and associated PCI resources, if they have been allocated.
*/ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
{ int i; struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
/* remove irqs - must happen before disabling/turning off */ for (i = 0; i < dd->msix_info.max_requested; i++, me++)
msix_free_irq(dd, i);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.