/** * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. * * @oct: Octeon device private data structure. * * Allocate resources to hold per Tx/Rx queue interrupt info. * This is the information passed to interrupt handler, from which napi poll * is scheduled and includes quick access to private data of Tx/Rx queue * corresponding to the interrupt being handled. * * Return: 0, on successful allocation of resources for all queue interrupts. * -1, if failed to allocate any resource.
*/ staticint octep_alloc_ioq_vectors(struct octep_device *oct)
{ int i; struct octep_ioq_vector *ioq_vector;
for (i = 0; i < oct->num_oqs; i++) {
oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); if (!oct->ioq_vector[i]) goto free_ioq_vector;
for (i = 0; i < oct->num_oqs; i++) { if (oct->ioq_vector[i]) {
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
}
netdev_info(oct->netdev, "Freed IOQ Vectors\n");
}
/** * octep_enable_msix_range() - enable MSI-x interrupts. * * @oct: Octeon device private data structure. * * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) * for the Octeon device. * * Return: 0, on successfully enabling all MSI-x interrupts. * -1, if failed to enable any MSI-x interrupt.
*/ staticint octep_enable_msix_range(struct octep_device *oct)
{ int num_msix, msix_allocated; int i;
/** * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for pfvf mbox interrupts.
*/ static irqreturn_t octep_mbox_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.mbox_intr_handler(oct);
}
/** * octep_oei_intr_handler() - common handler for output endpoint interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for all output endpoint interrupts.
*/ static irqreturn_t octep_oei_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.oei_intr_handler(oct);
}
/** * octep_ire_intr_handler() - common handler for input ring error interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for input ring error interrupts.
*/ static irqreturn_t octep_ire_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.ire_intr_handler(oct);
}
/** * octep_ore_intr_handler() - common handler for output ring error interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for output ring error interrupts.
*/ static irqreturn_t octep_ore_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.ore_intr_handler(oct);
}
/** * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for vf input ring error interrupts.
*/ static irqreturn_t octep_vfire_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.vfire_intr_handler(oct);
}
/** * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for vf output ring error interrupts.
*/ static irqreturn_t octep_vfore_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.vfore_intr_handler(oct);
}
/** * octep_dma_intr_handler() - common handler for dpi dma related interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for dpi dma related interrupts.
*/ static irqreturn_t octep_dma_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.dma_intr_handler(oct);
}
/** * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for dpi dma transaction error interrupts for VFs.
*/ static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.dma_vf_intr_handler(oct);
}
/** * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for pp transaction error interrupts for VFs.
*/ static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.pp_vf_intr_handler(oct);
}
/** * octep_misc_intr_handler() - common handler for mac related interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for mac related interrupts.
*/ static irqreturn_t octep_misc_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.misc_intr_handler(oct);
}
/** * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for all reserved interrupts.
*/ static irqreturn_t octep_rsvd_intr_handler(int irq, void *data)
{ struct octep_device *oct = data;
return oct->hw_ops.rsvd_intr_handler(oct);
}
/** * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. * * @irq: Interrupt number. * @data: interrupt data contains pointers to Tx/Rx queue private data * and correspong NAPI context. * * this is common handler for all non-queue (generic) interrupts.
*/ static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
{ struct octep_ioq_vector *ioq_vector = data; struct octep_device *oct = ioq_vector->octep_dev;
/** * octep_free_irqs() - free all registered interrupts. * * @oct: Octeon device private data structure. * * Free all queue and non-queue interrupts of the Octeon device.
*/ staticvoid octep_free_irqs(struct octep_device *oct)
{ int i;
/* First few MSI-X interrupts are non queue interrupts; free them */ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
free_irq(oct->msix_entries[i].vector, oct);
kfree(oct->non_ioq_irq_names);
/* Free IRQs for Input/Output (Tx/Rx) queues */ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
free_irq(oct->msix_entries[i].vector,
oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
}
netdev_info(oct->netdev, "IRQs freed\n");
}
/** * octep_setup_irqs() - setup interrupts for the Octeon device. * * @oct: Octeon device private data structure. * * Allocate data structures to hold per interrupt information, allocate/enable * MSI-x interrupt and register interrupt handlers. * * Return: 0, on successful allocation and registration of all interrupts. * -1, on any error.
*/ staticint octep_setup_irqs(struct octep_device *oct)
{ if (octep_alloc_ioq_vectors(oct)) goto ioq_vector_err;
if (octep_enable_msix_range(oct)) goto enable_msix_err;
if (octep_request_irqs(oct)) goto request_irq_err;
/* Flush the previous wrties before writing to RESEND bit */
wmb();
writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
}
/** * octep_napi_poll() - NAPI poll function for Tx/Rx. * * @napi: pointer to napi context. * @budget: max number of packets to be processed in single invocation.
*/ staticint octep_napi_poll(struct napi_struct *napi, int budget)
{ struct octep_ioq_vector *ioq_vector =
container_of(napi, struct octep_ioq_vector, napi);
u32 tx_pending, rx_done;
/* need more polling if tx completion processing is still pending or * processed at least 'budget' number of rx packets.
*/ if (tx_pending || rx_done >= budget) return budget;
/** * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_napi_add(struct octep_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
octep_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
/** * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_napi_delete(struct octep_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
netif_napi_del(&oct->ioq_vector[i]->napi);
oct->oq[i]->napi = NULL;
}
}
/** * octep_napi_enable() - enable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_napi_enable(struct octep_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
napi_enable(&oct->ioq_vector[i]->napi);
}
}
/** * octep_napi_disable() - disable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure.
*/ staticvoid octep_napi_disable(struct octep_device *oct)
{ int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
napi_disable(&oct->ioq_vector[i]->napi);
}
}
/** * octep_stop() - stop the octeon network device. * * @netdev: pointer to kernel network device. * * stop the device Tx/Rx operations, bring down the link and * free up all resources allocated for Tx/Rx queues and interrupts.
*/ staticint octep_stop(struct net_device *netdev)
{ struct octep_device *oct = netdev_priv(netdev);
/** * octep_iq_full_check() - check if a Tx queue is full. * * @iq: Octeon Tx queue data structure. * * Return: 0, if the Tx queue is not full. * 1, if the Tx queue is full.
*/ staticinlineint octep_iq_full_check(struct octep_iq *iq)
{ if (likely((IQ_INSTR_SPACE(iq)) >
OCTEP_WAKE_QUEUE_THRESHOLD)) return 0;
/* Stop the queue if unable to send */
netif_stop_subqueue(iq->netdev, iq->q_no);
/* Allow for pending updates in write index * from iq_process_completion in other cpus * to reflect, in case queue gets free * entries.
*/
smp_mb();
/* check again and restart the queue, in case NAPI has just freed * enough Tx ring entries.
*/ if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
iq->stats->restart_cnt++; return 0;
}
skb_tx_timestamp(skb);
iq->fill_cnt++;
wi++;
iq->host_write_index = wi & iq->ring_size_mask;
/* octep_iq_full_check stops the queue and returns * true if so, in case the queue has become full * by inserting current packet. If so, we can * go ahead and ring doorbell.
*/ if (!octep_iq_full_check(iq) && xmit_more &&
iq->fill_cnt < iq->fill_threshold) return NETDEV_TX_OK;
/* Flush the hw descriptor before writing to doorbell */
wmb(); /* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0; return NETDEV_TX_OK;
/** * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. * * @work: pointer to Tx queue timeout work_struct * * Stop and start the device so that it frees up all queue resources * and restarts the queues, that potentially clears a Tx queue timeout * condition.
**/ staticvoid octep_tx_timeout_task(struct work_struct *work)
{ struct octep_device *oct = container_of(work, struct octep_device,
tx_timeout_task); struct net_device *netdev = oct->netdev;
rtnl_lock(); if (netif_running(netdev)) {
octep_stop(netdev);
octep_open(netdev);
}
rtnl_unlock();
}
/** * octep_tx_timeout() - Handle Tx Queue timeout. * * @netdev: pointer to kernel network device. * @txqueue: Timed out Tx queue number. * * Schedule a work to handle Tx queue timeout.
*/ staticvoid octep_tx_timeout(struct net_device *netdev, unsignedint txqueue)
{ struct octep_device *oct = netdev_priv(netdev);
/** * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. * * @work: pointer to ctrl mbox work_struct * * Poll ctrl mbox message queue and handle control messages from firmware.
**/ staticvoid octep_ctrl_mbox_task(struct work_struct *work)
{ struct octep_device *oct = container_of(work, struct octep_device,
ctrl_mbox_task);
octep_ctrl_net_recv_fw_messages(oct);
}
staticconstchar *octep_devid_to_str(struct octep_device *oct)
{ switch (oct->chip_id) { case OCTEP_PCI_DEVICE_ID_CN98_PF: return"CN98XX"; case OCTEP_PCI_DEVICE_ID_CN93_PF: return"CN93XX"; case OCTEP_PCI_DEVICE_ID_CNF95N_PF: return"CNF95N"; case OCTEP_PCI_DEVICE_ID_CN10KA_PF: return"CN10KA"; case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: return"CNF10KA"; case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: return"CNF10KB"; case OCTEP_PCI_DEVICE_ID_CN10KB_PF: return"CN10KB"; default: return"Unsupported";
}
}
/** * octep_device_setup() - Setup Octeon Device. * * @oct: Octeon device private data structure. * * Setup Octeon device hardware operations, configuration, etc ...
*/ int octep_device_setup(struct octep_device *oct)
{ struct pci_dev *pdev = oct->pdev; int i, ret;
/* allocate memory for oct->conf */
oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); if (!oct->conf) return -ENOMEM;
/* Map BAR regions */ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
oct->mmio[i].hw_addr =
ioremap(pci_resource_start(oct->pdev, i * 2),
pci_resource_len(oct->pdev, i * 2)); if (!oct->mmio[i].hw_addr) goto unmap_prev;
/** * octep_init_module() - Module initialiation. * * create common resource for the driver and register PCI driver.
*/ staticint __init octep_init_module(void)
{ int ret;
/* work queue for all deferred tasks */
octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); if (!octep_wq) {
pr_err("%s: Failed to create common workqueue\n",
OCTEP_DRV_NAME); return -ENOMEM;
}
ret = pci_register_driver(&octep_driver); if (ret < 0) {
pr_err("%s: Failed to register PCI driver; err=%d\n",
OCTEP_DRV_NAME, ret);
destroy_workqueue(octep_wq); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.