/********************************************************************** * Author: Cavium, Inc. * * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/ #include <linux/pci.h> #include <linux/if_vlan.h> #include"liquidio_common.h" #include"octeon_droq.h" #include"octeon_iq.h" #include"response_manager.h" #include"octeon_device.h" #include"octeon_nic.h" #include"octeon_main.h" #include"octeon_network.h"
MODULE_AUTHOR("Cavium Networks, ");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* allocate memory to store virtual and dma base address of * per glist consistent memory
*/
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
GFP_KERNEL);
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
GFP_KERNEL);
if (!lio->glists_virt_base || !lio->glists_dma_base) {
lio_delete_glists(lio); return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) { int numa_node = dev_to_node(&oct->pci_dev->dev);
switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: case OCTNET_CMD_SET_MULTI_LIST: case OCTNET_CMD_SET_UC_LIST: break;
case OCTNET_CMD_CHANGE_MACADDR:
mac = ((u8 *)&nctrl->udd[0]) + 2; if (nctrl->ncmd.s.param1) { /* vfidx is 0 based, but vf_num (param1) is 1 based */ int vfidx = nctrl->ncmd.s.param1 - 1; bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
if (mac_is_admin_assigned)
netif_info(lio, probe, lio->netdev, "MAC Address %pM is configured for VF %d\n",
mac, vfidx);
} else {
netif_info(lio, probe, lio->netdev, " MACAddr changed to %pM\n",
mac);
} break;
case OCTNET_CMD_GPIO_ACCESS:
netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
break;
case OCTNET_CMD_ID_ACTIVE:
netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
break;
case OCTNET_CMD_LRO_ENABLE:
dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); break;
case OCTNET_CMD_LRO_DISABLE:
dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
netdev->name); break;
case OCTNET_CMD_VERBOSE_ENABLE:
dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
netdev->name); break;
case OCTNET_CMD_VERBOSE_DISABLE:
dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
netdev->name); break;
case OCTNET_CMD_VLAN_FILTER_CTL: if (nctrl->ncmd.s.param1)
dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", netdev->name); else
dev_info(&oct->pci_dev->dev, "%s VLAN filter disabled\n", netdev->name); break;
/* Enable the droq queues */
octeon_set_droq_pkt_op(oct, q_no, 1);
/* Send Credit for Octeon Output queues. Credits are always * sent after the output queue is enabled.
*/
writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
return ret_val;
}
/** * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer. * @octeon_id:octeon device id. * @skbuff: skbuff struct to be passed to network layer. * @len: size of total data received. * @rh: Control header associated with the packet * @param: additional control data with the packet * @arg: farg registered in droq_ops
*/ staticvoid
liquidio_push_packet(u32 __maybe_unused octeon_id, void *skbuff,
u32 len, union octeon_rh *rh, void *param, void *arg)
{ struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq =
container_of(param, struct octeon_droq, napi); struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; struct napi_struct *napi = param;
u16 vtag = 0;
u32 r_dh_off;
u64 ns;
/* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
recv_buffer_free(skb);
droq->stats.rx_dropped++; return;
}
if (oct->ptp_enable) { if (rh->r_dh.has_hwtstamp) { /* timestamp is included from the hardware at * the beginning of the packet.
*/ if (ifstate_check
(lio,
LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { /* Nanoseconds are in the first 64-bits * of the packet.
*/
memcpy(&ns, (skb->data + r_dh_off), sizeof(ns));
r_dh_off -= BYTES_PER_DHLEN_UNIT;
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(ns +
lio->ptp_adjust);
}
}
}
/* Setting Encapsulation field on basis of status received * from the firmware
*/ if (rh->r_dh.encap_on) {
skb->encapsulation = 1;
skb->csum_level = 1;
droq->stats.rx_vxlan++;
}
/* inbound VLAN tag */ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
rh->r_dh.vlan) {
u16 priority = rh->r_dh.priority;
u16 vid = rh->r_dh.vlan;
/** * napi_schedule_wrapper - wrapper for calling napi_schedule * @param: parameters to pass to napi_schedule * * Used when scheduling on different CPUs
*/ staticvoid napi_schedule_wrapper(void *param)
{ struct napi_struct *napi = param;
napi_schedule(napi);
}
/** * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode * @arg: pointer to octeon output queue
*/ staticvoid liquidio_napi_drv_callback(void *arg)
{ struct octeon_device *oct; struct octeon_droq *droq = arg; int this_cpu = smp_processor_id();
/** * liquidio_napi_poll - Entry point for NAPI polling * @napi: NAPI structure * @budget: maximum number of items to process
*/ staticint liquidio_napi_poll(struct napi_struct *napi, int budget)
{ struct octeon_instr_queue *iq; struct octeon_device *oct; struct octeon_droq *droq; int tx_done = 0, iq_no; int work_done;
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no]; if (iq) { /* TODO: move this check to inside octeon_flush_iq, * once check_db_timeout is removed
*/ if (atomic_read(&iq->instr_pending)) /* Process iq buffers with in the budget limits */
tx_done = octeon_flush_iq(oct, iq, budget); else
tx_done = 1; /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false.
*/ /* sub-queue status update */
lio_update_txq_status(oct, iq_no);
} else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no);
}
#define MAX_REG_CNT 2000000U /* force enable interrupt if reg cnts are high to avoid wraparound */ if ((work_done < budget && tx_done) ||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
(droq->pkt_count >= MAX_REG_CNT)) {
napi_complete_done(napi, work_done);
/** * liquidio_setup_io_queues - Setup input and output queues * @octeon_dev: octeon device * @ifidx: Interface index * @num_iqs: input io queue count * @num_oqs: output io queue count * * Note: Queues are with respect to the octeon device. Thus * an input queue is for egress packets, and output queues * are for ingress packets.
*/ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
u32 num_iqs, u32 num_oqs)
{ struct octeon_droq_ops droq_ops; struct net_device *netdev; struct octeon_droq *droq; struct napi_struct *napi; int cpu_id_modulus; int num_tx_descs; struct lio *lio; int retval = 0; int q, q_no; int cpu_id;
if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { /* 23XX PF/VF can send/recv control messages (via the first * PF/VF-owned droq) from the firmware even if the ethX * interface is down, so that's why poll_mode must be off * for the first droq.
*/
octeon_dev->droq[0]->ops.poll_mode = 0;
}
/* set up IQs. */ for (q = 0; q < num_iqs; q++) {
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
octeon_get_conf(octeon_dev), lio->ifidx);
retval = octeon_setup_iq(octeon_dev, ifidx, q,
lio->linfo.txpciq[q], num_tx_descs,
netdev_get_tx_queue(netdev, q)); if (retval) {
dev_err(&octeon_dev->pci_dev->dev, " %s : Runtime IQ(TxQ) creation failed.\n",
__func__); return 1;
}
if (droq->ops.poll_mode) {
droq->ops.napi_fn(droq);
} else { if (ret & MSIX_PO_INT) { if (OCTEON_CN23XX_VF(oct))
dev_err(&oct->pci_dev->dev, "should not come here should not get rx when poll mode = 0 for vf\n");
tasklet_schedule(&oct_priv->droq_tasklet); return 1;
} /* this will be flushed periodically by check iq db */ if (ret & MSIX_PI_INT) return 0;
}
/** * octeon_setup_interrupt - Setup interrupt for octeon device * @oct: octeon device * @num_ioqs: number of queues * * Enable interrupt in Octeon device as given in the PCI interrupt mask.
*/ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
{ struct msix_entry *msix_entries; char *queue_irq_names = NULL; int i, num_interrupts = 0; int num_alloc_ioq_vectors; char *aux_irq_name = NULL; int num_ioq_vectors; int irqret, err;
if (oct->msix_on) {
oct->num_msix_irqs = num_ioqs; if (OCTEON_CN23XX_PF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
/* one non ioq interrupt for handling * sli_mac_pf_int_sum
*/
oct->num_msix_irqs += 1;
} elseif (OCTEON_CN23XX_VF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
}
/* allocate storage for the names assigned to each irq */
oct->irq_name_storage =
kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); if (!oct->irq_name_storage) {
dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); return -ENOMEM;
}
queue_irq_names = oct->irq_name_storage;
if (OCTEON_CN23XX_PF(oct))
aux_irq_name = &queue_irq_names
[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
/*Assumption is that pf msix vectors start from pf srn to pf to * trs and not from 0. if not change this code
*/ if (OCTEON_CN23XX_PF(oct)) { for (i = 0; i < oct->num_msix_irqs - 1; i++)
msix_entries[i].entry =
oct->sriov_info.pf_srn + i;
msix_entries[oct->num_msix_irqs - 1].entry =
oct->sriov_info.trs;
} elseif (OCTEON_CN23XX_VF(oct)) { for (i = 0; i < oct->num_msix_irqs; i++)
msix_entries[i].entry = i;
}
num_alloc_ioq_vectors = pci_enable_msix_range(
oct->pci_dev, msix_entries,
oct->num_msix_irqs,
oct->num_msix_irqs); if (num_alloc_ioq_vectors < 0) {
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL; return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs; /* For PF, there is one non-ioq interrupt handler */ if (OCTEON_CN23XX_PF(oct)) {
num_ioq_vectors -= 1;
snprintf(aux_irq_name, INTRNAMSIZ, "LiquidIO%u-pf%u-aux", oct->octeon_id,
oct->pf_num);
irqret = request_irq(
msix_entries[num_ioq_vectors].vector,
liquidio_legacy_intr_handler, 0,
aux_irq_name, oct); if (irqret) {
dev_err(&oct->pci_dev->dev, "Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
oct->msix_entries = NULL; return irqret;
}
} for (i = 0 ; i < num_ioq_vectors ; i++) { if (OCTEON_CN23XX_PF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
oct->octeon_id, oct->pf_num, i);
if (OCTEON_CN23XX_VF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
oct->octeon_id, oct->vf_num, i);
if (irqret) {
dev_err(&oct->pci_dev->dev, "Request_irq failed for MSIX interrupt Error: %d\n",
irqret); /* Freeing the non-ioq irq vector here . */
free_irq(msix_entries[num_ioq_vectors].vector,
oct);
while (i) {
i--; /* clearing affinity mask. */
irq_set_affinity_hint(
msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
oct->msix_entries = NULL; return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector; /* assign the cpu mask for this msix interrupt vector */
irq_set_affinity_hint(msix_entries[i].vector,
&oct->ioq_vector[i].affinity_mask
);
}
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
oct->octeon_id);
} else {
err = pci_enable_msi(oct->pci_dev); if (err)
dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
err); else
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL); if (!oct->irq_name_storage) return -ENOMEM;
queue_irq_names = oct->irq_name_storage;
if (OCTEON_CN23XX_PF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
oct->octeon_id, oct->pf_num, 0);
if (OCTEON_CN23XX_VF(oct))
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
oct->octeon_id, oct->vf_num, 0);
/** * liquidio_change_mtu - Net device change_mtu * @netdev: network device * @new_mtu: the new max transmit unit size
*/ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{ struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int ret = 0;
ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
octeon_free_soft_command(oct, sc); return -EINVAL;
} /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out.
*/
ret = wait_for_sc_completion_timeout(oct, sc, 0); if (ret) return ret;
if (sc->sc_status) {
WRITE_ONCE(sc->caller_is_done, true); return -EINVAL;
}
/* Number of packets that are LROed */
rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; /* Number of octets that are LROed */
rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; /* Number of LRO packets formed */
rstats->fw_total_lro = rsp_rstats->fw_total_lro; /* Number of times lRO of packet aborted */
rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; /* intrmod: packet forward rate */
rstats->fwd_rate = rsp_rstats->fwd_rate;
/* TX link-level stats */
tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
tstats->ctl_sent = rsp_tstats->ctl_sent; /* Packets sent after one collision*/
tstats->one_collision_sent = rsp_tstats->one_collision_sent; /* Packets sent after multiple collision*/
tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; /* Packets not sent due to max collisions */
tstats->max_collision_fail = rsp_tstats->max_collision_fail; /* Packets not sent due to max deferrals */
tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; /* Accounts for over/under-run of buffers */
tstats->fifo_err = rsp_tstats->fifo_err;
tstats->runts = rsp_tstats->runts; /* Total number of collisions detected */
tstats->total_collisions = rsp_tstats->total_collisions;
var = be32_to_cpu((__force __be32)resp->speed);
oct->speed_setting = var; if (var == 0xffff) { /* unable to access boot variables * get the default value based on the NIC type
*/ if (oct->subsystem_id ==
OCTEON_CN2350_25GB_SUBSYS_ID ||
oct->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
oct->no_speed_setting = 1;
oct->speed_setting = 25;
} else {
oct->speed_setting = 10;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.