/** * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. * * @oct: Octeon device private data structure. * * Allocate resources to hold per Tx/Rx queue interrupt info. * This is the information passed to interrupt handler, from which napi poll * is scheduled and includes quick access to private data of Tx/Rx queue * corresponding to the interrupt being handled. * * Return: 0, on successful allocation of resources for all queue interrupts. * -1, if failed to allocate any resource.
*/ staticint octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
{ struct octep_vf_ioq_vector *ioq_vector; int i;
for (i = 0; i < oct->num_oqs; i++) {
oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); if (!oct->ioq_vector[i]) goto free_ioq_vector;
for (i = 0; i < oct->num_oqs; i++) { if (oct->ioq_vector[i]) {
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
}
netdev_info(oct->netdev, "Freed IOQ Vectors\n");
}
/** * octep_vf_enable_msix_range() - enable MSI-x interrupts. * * @oct: Octeon device private data structure. * * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) * for the Octeon device. * * Return: 0, on successfully enabling all MSI-x interrupts. * -1, if failed to enable any MSI-x interrupt.
*/ staticint octep_vf_enable_msix_range(struct octep_vf_device *oct)
{ int num_msix, msix_allocated; int i;
/** * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. * * @irq: Interrupt number. * @data: interrupt data contains pointers to Tx/Rx queue private data * and correspong NAPI context. * * this is common handler for all non-queue (generic) interrupts.
*/ static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
{ struct octep_vf_ioq_vector *ioq_vector = data; struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
/** * octep_vf_free_irqs() - free all registered interrupts. * * @oct: Octeon device private data structure. * * Free all queue and non-queue interrupts of the Octeon device.
*/ staticvoid octep_vf_free_irqs(struct octep_vf_device *oct)
{ int i;
for (i = 0; i < oct->num_irqs; i++) {
irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
}
netdev_info(oct->netdev, "IRQs freed\n");
}
/** * octep_vf_setup_irqs() - setup interrupts for the Octeon device. * * @oct: Octeon device private data structure. * * Allocate data structures to hold per interrupt information, allocate/enable * MSI-x interrupt and register interrupt handlers. * * Return: 0, on successful allocation and registration of all interrupts. * -1, on any error.
*/ staticint octep_vf_setup_irqs(struct octep_vf_device *oct)
{ if (octep_vf_alloc_ioq_vectors(oct)) goto ioq_vector_err;
if (octep_vf_enable_msix_range(oct)) goto enable_msix_err;
if (octep_vf_request_irqs(oct)) goto request_irq_err;
/* Flush the previous wrties before writing to RESEND bit */
smp_wmb();
writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
}
/** * octep_vf_napi_poll() - NAPI poll function for Tx/Rx. * * @napi: pointer to napi context. * @budget: max number of packets to be processed in single invocation.
*/ staticint octep_vf_napi_poll(struct napi_struct *napi, int budget)
{ struct octep_vf_ioq_vector *ioq_vector =
container_of(napi, struct octep_vf_ioq_vector, napi);
u32 tx_pending, rx_done;
/* need more polling if tx completion processing is still pending or * processed at least 'budget' number of rx packets.
*/ if (tx_pending || rx_done >= budget) return budget;
if (likely(napi_complete_done(napi, rx_done)))
octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
return rx_done;
}
/** * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_vf_napi_add(struct octep_vf_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
/** * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_vf_napi_delete(struct octep_vf_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
netif_napi_del(&oct->ioq_vector[i]->napi);
oct->oq[i]->napi = NULL;
}
}
/** * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_vf_napi_enable(struct octep_vf_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
napi_enable(&oct->ioq_vector[i]->napi);
}
}
/** * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_vf_napi_disable(struct octep_vf_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
napi_disable(&oct->ioq_vector[i]->napi);
}
}
staticvoid octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
{ int err;
err = octep_vf_mbox_set_rx_state(oct, up); if (err)
netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
}
staticint octep_vf_get_link_status(struct octep_vf_device *oct)
{ int err;
err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up); if (err)
netdev_err(oct->netdev, "Get link status failed with err:%d\n", err); return oct->link_info.oper_up;
}
staticvoid octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
{ int err;
err = octep_vf_mbox_set_link_status(oct, up); if (err) {
netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err); return;
}
oct->link_info.oper_up = up;
}
/** * octep_vf_open() - start the octeon network device. * * @netdev: pointer to kernel network device. * * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues * and interrupts.. * * Return: 0, on successfully setting up device and bring it up. * -1, on any error.
*/ staticint octep_vf_open(struct net_device *netdev)
{ struct octep_vf_device *oct = netdev_priv(netdev); int err, ret;
/** * octep_vf_stop() - stop the octeon network device. * * @netdev: pointer to kernel network device. * * stop the device Tx/Rx operations, bring down the link and * free up all resources allocated for Tx/Rx queues and interrupts.
*/ staticint octep_vf_stop(struct net_device *netdev)
{ struct octep_vf_device *oct = netdev_priv(netdev);
netdev_info(netdev, "Stopping the device ...\n");
/* Stop Tx from stack */
netif_carrier_off(netdev);
netif_tx_disable(netdev);
/** * octep_vf_iq_full_check() - check if a Tx queue is full. * * @iq: Octeon Tx queue data structure. * * Return: 0, if the Tx queue is not full. * 1, if the Tx queue is full.
*/ staticint octep_vf_iq_full_check(struct octep_vf_iq *iq)
{ int ret;
ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
OCTEP_VF_WAKE_QUEUE_THRESHOLD,
OCTEP_VF_WAKE_QUEUE_THRESHOLD); switch (ret) { case 0: /* Stopped the queue, since IQ is full */ return 1; case -1: /* * Pending updates in write index from * iq_process_completion in other cpus * caused queues to get re-enabled after * being stopped
*/
iq->stats->restart_cnt++;
fallthrough; case 1: /* Queue left enabled, since IQ is not yet full*/ return 0;
}
frag++;
si++;
}
hw_desc->dptr = tx_buffer->sglist_dma;
} if (oct->fw_info.tx_ol_flags) { if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
} elseif (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
} /* due to ESR txm will be swapped by hw */
hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
}
xmit_more = netdev_xmit_more();
netdev_tx_sent_queue(iq->netdev_q, skb->len);
skb_tx_timestamp(skb);
iq->fill_cnt++;
wi++;
iq->host_write_index = wi & iq->ring_size_mask;
/* octep_iq_full_check stops the queue and returns * true if so, in case the queue has become full * by inserting current packet. If so, we can * go ahead and ring doorbell.
*/ if (!octep_vf_iq_full_check(iq) && xmit_more &&
iq->fill_cnt < iq->fill_threshold) return NETDEV_TX_OK;
int octep_vf_get_link_info(struct octep_vf_device *oct)
{ int ret, size;
ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
(u8 *)&oct->link_info, &size); if (ret) {
dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n"); return ret;
} return 0;
}
/** * octep_vf_get_stats64() - Get Octeon network device statistics. * * @netdev: kernel network device. * @stats: pointer to stats structure to be filled in.
*/ staticvoid octep_vf_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{ struct octep_vf_device *oct = netdev_priv(netdev);
u64 tx_packets, tx_bytes, rx_packets, rx_bytes; int q;
/** * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout. * * @work: pointer to Tx queue timeout work_struct * * Stop and start the device so that it frees up all queue resources * and restarts the queues, that potentially clears a Tx queue timeout * condition.
**/ staticvoid octep_vf_tx_timeout_task(struct work_struct *work)
{ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
tx_timeout_task); struct net_device *netdev = oct->netdev;
staticconstchar *octep_vf_devid_to_str(struct octep_vf_device *oct)
{ switch (oct->chip_id) { case OCTEP_PCI_DEVICE_ID_CN93_VF: return"CN93XX"; case OCTEP_PCI_DEVICE_ID_CNF95N_VF: return"CNF95N"; case OCTEP_PCI_DEVICE_ID_CN10KA_VF: return"CN10KA"; case OCTEP_PCI_DEVICE_ID_CNF10KA_VF: return"CNF10KA"; case OCTEP_PCI_DEVICE_ID_CNF10KB_VF: return"CNF10KB"; case OCTEP_PCI_DEVICE_ID_CN10KB_VF: return"CN10KB"; default: return"Unsupported";
}
}
/** * octep_vf_init_module() - Module initialization. * * create common resource for the driver and register PCI driver.
*/ staticint __init octep_vf_init_module(void)
{ int ret;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.4Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.