/** * hfi1_num_netdev_contexts - Count of netdev recv contexts to use. * @dd: device on which to allocate netdev contexts * @available_contexts: count of available receive contexts * @cpu_mask: mask of possible cpus to include for contexts * * Return: count of physical cores on a node or the remaining available recv * contexts for netdev recv context usage up to the maximum of * HFI1_MAX_NETDEV_CTXTS. * A value of 0 can be returned when acceleration is explicitly turned off, * a memory allocation error occurs or when there are no available contexts. *
*/
u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, struct cpumask *cpu_mask)
{
cpumask_var_t node_cpu_mask; unsignedint available_cpus;
if (!HFI1_CAP_IS_KSET(AIP)) return 0;
/* Always give user contexts priority over netdev contexts */ if (available_contexts == 0) {
dd_dev_info(dd, "No receive contexts available for netdevs.\n"); return 0;
}
if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n"); return 0;
}
if (!rx->rxq) {
dd_dev_err(dd, "Unable to allocate netdev queue data\n"); return (-ENOMEM);
}
for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); if (rc) goto bail_context_irq_failure;
hfi1_rcd_get(rxq->rcd);
rxq->rx = rx;
rxq->rcd->napi = &rxq->napi;
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
i, rxq->rcd->ctxt); /* * Disable BUSY_POLL on this NAPI as this is not supported * right now.
*/
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi);
rc = msix_netdev_request_rcd_irq(rxq->rcd); if (rc) goto bail_context_irq_failure;
}
return 0;
bail_context_irq_failure:
dd_dev_err(dd, "Unable to allot receive context\n"); for (; i >= 0; i--) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
staticvoid enable_queues(struct hfi1_netdev_rx *rx)
{ int i;
for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
napi_enable(&rxq->napi);
hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
rxq->rcd);
}
}
staticvoid disable_queues(struct hfi1_netdev_rx *rx)
{ int i;
msix_netdev_synchronize_irq(rx->dd);
for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
/* wait for napi if it was scheduled */
hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
rxq->rcd);
napi_synchronize(&rxq->napi);
napi_disable(&rxq->napi);
}
}
/** * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time, * it allocates receive queue data and calls netif_napi_add * for each queue. * * @dd: hfi1 dev data
*/ int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx; int res;
if (atomic_fetch_inc(&rx->netdevs)) return 0;
mutex_lock(&hfi1_mutex);
res = hfi1_netdev_rxq_init(rx);
mutex_unlock(&hfi1_mutex); return res;
}
/** * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0 * napi is deleted and receive queses memory is freed. * * @dd: hfi1 dev data
*/ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx;
/* destroy the RX queues only if it is the last netdev going away */ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
mutex_lock(&hfi1_mutex);
hfi1_netdev_rxq_deinit(rx);
mutex_unlock(&hfi1_mutex);
}
return 0;
}
/** * hfi1_alloc_rx - Allocates the rx support structure * @dd: hfi1 dev data * * Allocate the rx structure to support gathering the receive * resources and the dummy netdev. * * Updates dd struct pointer upon success. * * Return: 0 (success) -error on failure *
*/ int hfi1_alloc_rx(struct hfi1_devdata *dd)
{ struct hfi1_netdev_rx *rx;
/** * hfi1_netdev_enable_queues - This is napi enable function. * It enables napi objects associated with queues. * When at least one device has called it it increments atomic counter. * Disable function decrements counter and when it is 0, * calls napi_disable for every queue. * * @dd: hfi1 dev data
*/ void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
{ struct hfi1_netdev_rx *rx;
if (!dd->netdev_rx) return;
rx = dd->netdev_rx; if (atomic_fetch_inc(&rx->enabled)) return;
/** * hfi1_netdev_add_data - Registers data with unique identifier * to be requested later this is needed for VNIC and IPoIB VLANs * implementations. * This call is protected by mutex idr_lock. * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX * @data: data to be associated with index
*/ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx;
/** * hfi1_netdev_remove_data - Removes data with previously given id. * Returns the reference to removed entry. * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX
*/ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx;
return xa_erase(&rx->dev_tbl, id);
}
/** * hfi1_netdev_get_data - Gets data with given id * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX
*/ void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx;
return xa_load(&rx->dev_tbl, id);
}
/** * hfi1_netdev_get_first_data - Gets first entry with greater or equal id. * * @dd: hfi1 dev data * @start_id: requested integer id up to INT_MAX
*/ void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{ struct hfi1_netdev_rx *rx = dd->netdev_rx; unsignedlong index = *start_id; void *ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.