/********************************************************************** * Author: Cavium, Inc. * * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <net/vxlan.h> #include"liquidio_common.h" #include"octeon_droq.h" #include"octeon_iq.h" #include"response_manager.h" #include"octeon_device.h" #include"octeon_nic.h" #include"octeon_main.h" #include"octeon_network.h" #include"cn23xx_vf_device.h"
MODULE_AUTHOR("Cavium Networks, ");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
MODULE_LICENSE("GPL");
staticint lio_wait_for_oq_pkts(struct octeon_device *oct)
{ struct octeon_device_priv *oct_priv = oct->priv; int retry = MAX_IO_PENDING_PKT_COUNT; int pkt_cnt = 0, pending_pkts; int i;
do {
pending_pkts = 0;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue;
pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
} if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
tasklet_schedule(&oct_priv->droq_tasklet);
}
pkt_cnt = 0;
schedule_timeout_uninterruptible(1);
} while (retry-- && pending_pkts);
return pkt_cnt;
}
/** * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc * @oct: Pointer to Octeon device
*/ staticvoid pcierror_quiesce_device(struct octeon_device *oct)
{ int i;
/* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet processing * to finish.
*/
/* To allow for in-flight requests */
schedule_timeout_uninterruptible(100);
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
iq = oct->instr_queue[i];
/** * stop_pci_io - Stop all PCI IO to a given device * @oct: Pointer to Octeon device
*/ staticvoid stop_pci_io(struct octeon_device *oct)
{ struct msix_entry *msix_entries; int i;
/* No more instructions will be forwarded. */
atomic_set(&oct->status, OCT_DEV_IN_RESET);
for (i = 0; i < oct->ifcount; i++)
netif_device_detach(oct->props[i].netdev);
pcierror_quiesce_device(oct); if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs; i++) { /* clear the affinity_cpumask */
irq_set_affinity_hint(msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
octeon_free_ioq_vector(oct);
}
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
/* making it a common function for all OCTEON models */
cleanup_aer_uncorrect_error_status(oct->pci_dev);
pci_disable_device(oct->pci_dev);
}
/** * liquidio_pcie_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected.
*/ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{ struct octeon_device *oct = pci_get_drvdata(pdev);
/* lio->linfo.link.s.mtu always contains max MTU of the lio interface. * this API is invoked only when new max-MTU of the interface is * less than current MTU.
*/
rtnl_lock();
dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
rtnl_unlock();
}
/** * setup_link_status_change_wq - Sets up the mtu status change work * @netdev: network device
*/ staticint setup_link_status_change_wq(struct net_device *netdev)
{ struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
WQ_MEM_RECLAIM, 0); if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); return -1;
}
INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
octnet_link_status_change);
lio->link_status_wq.wk.ctxptr = lio;
if (lio->link_status_wq.wq) {
cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
destroy_workqueue(lio->link_status_wq.wq);
}
}
/** * update_link_status - Update link status * @netdev: network device * @ls: link status structure * * Called on receipt of a link status response from the core application to * update each interface's link status.
*/ staticvoid update_link_status(struct net_device *netdev, union oct_link_status *ls)
{ struct lio *lio = GET_LIO(netdev); int current_max_mtu = lio->linfo.link.s.mtu; struct octeon_device *oct = lio->oct_dev;
if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
lio->linfo.link.u64 = ls->u64;
if (lio->linfo.link.s.mtu != current_max_mtu) {
dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n",
current_max_mtu, lio->linfo.link.s.mtu);
netdev->max_mtu = lio->linfo.link.s.mtu;
}
if (lio->linfo.link.s.mtu < netdev->mtu) {
dev_warn(&oct->pci_dev->dev, "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
netdev->mtu, lio->linfo.link.s.mtu);
queue_delayed_work(lio->link_status_wq.wq,
&lio->link_status_wq.wk.work, 0);
}
}
}
switch (atomic_read(&oct->status)) { case OCT_DEV_RUNNING: case OCT_DEV_CORE_OK: /* No more instructions will be forwarded. */
atomic_set(&oct->status, OCT_DEV_IN_RESET);
oct->app_mode = CVM_DRV_INVALID_APP;
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
schedule_timeout_uninterruptible(HZ / 10);
fallthrough; case OCT_DEV_HOST_OK: case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish.
*/
oct->fn_list.disable_io_queues(oct);
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* Force all requests waiting to be fetched by OCTEON to * complete.
*/ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
iq = oct->instr_queue[i];
fallthrough; case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs; i++) { if (oct->ioq_vector[i].vector) {
irq_set_affinity_hint(
msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
oct->ioq_vector[i].vector = 0;
}
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
} /* Soft reset the octeon device before exiting */ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
octeon_pci_flr(oct); else
cn23xx_vf_ask_pf_to_do_flr(oct);
fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
octeon_free_ioq_vector(oct);
fallthrough; case OCT_DEV_MBOX_SETUP_DONE:
oct->fn_list.free_mbox(oct);
fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE:
mdelay(100); for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue;
octeon_delete_droq(oct, i);
}
fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
octeon_delete_instr_queue(oct, i);
}
fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
fallthrough; case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
fallthrough; case OCT_DEV_PCI_MAP_DONE:
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
fallthrough; case OCT_DEV_PCI_ENABLE_DONE: /* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
fallthrough; case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break;
}
tasklet_kill(&oct_priv->droq_tasklet);
}
/** * send_rx_ctrl_cmd - Send Rx control command * @lio: per-network private data * @start_stop: whether to start or stop
*/ staticint send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int retval;
if (oct->props[lio->ifidx].rx_on == start_stop) return 0;
retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
} else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0); if (retval) return retval;
/** * liquidio_destroy_nic_device - Destroy NIC device interface * @oct: octeon device * @ifidx: which interface to destroy * * Cleanup associated with each interface for an Octeon device when NIC * module is being unloaded or if initialization fails during load.
*/ staticvoid liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{ struct net_device *netdev = oct->props[ifidx].netdev; struct octeon_device_priv *oct_priv = oct->priv; struct napi_struct *napi, *n; struct lio *lio;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
__func__, ifidx); return;
}
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
liquidio_stop(netdev);
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
oct->droq[0]->ops.poll_mode = 0;
}
/* Delete NAPI */
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi);
tasklet_enable(&oct_priv->droq_tasklet);
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
cleanup_rx_oom_poll_fn(netdev);
cleanup_link_status_change_wq(netdev);
lio_delete_glists(lio);
free_netdev(netdev);
oct->props[ifidx].gmxport = -1;
oct->props[ifidx].netdev = NULL;
}
/** * liquidio_stop_nic_module - Stop complete NIC functionality * @oct: octeon device
*/ staticint liquidio_stop_nic_module(struct octeon_device *oct)
{ struct lio *lio; int i, j;
dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); if (!oct->ifcount) {
dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); return 1;
}
/* This octeon device has been removed. Update the global * data structure to reflect this. Free the device structure.
*/
octeon_free_device_mem(oct_dev);
}
/** * octeon_pci_os_setup - PCI initialization for each Octeon device. * @oct: octeon device
*/ staticint octeon_pci_os_setup(struct octeon_device *oct)
{ #ifdef CONFIG_PCI_IOV /* setup PCI stuff first */ if (!oct->pci_dev->physfn)
octeon_pci_flr(oct); #endif
if (pci_enable_device(oct->pci_dev)) {
dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); return 1;
}
/* tell Octeon to stop forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 0); if (ret) return ret;
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */
lio->intf_open = 0;
lio->linfo.link.s.link_up = 0;
netif_carrier_off(netdev);
lio->link_changes++;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
stop_txqs(netdev);
/* Wait for any pending Rx descriptors */ if (lio_wait_for_clean_oq(oct))
netif_info(lio, rx_err, lio->netdev, "Proceeding with stop interface after partial RX desc processing\n");
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
oct->droq[0]->ops.poll_mode = 0;
tasklet_enable(&oct_priv->droq_tasklet);
}
cancel_delayed_work_sync(&lio->stats_wk.work);
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
return ret;
}
/** * get_new_flags - Converts a mask based on net device flags * @netdev: network device * * This routine generates a octnet_ifflags mask from the net device flags * received from the OS.
*/ staticenum octnet_ifflags get_new_flags(struct net_device *netdev)
{ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
if (netdev->flags & IFF_PROMISC)
f |= OCTNET_IFFLAG_PROMISC;
if (netdev->flags & IFF_ALLMULTI)
f |= OCTNET_IFFLAG_ALLMULTI;
if (netdev->flags & IFF_MULTICAST) {
f |= OCTNET_IFFLAG_MULTICAST;
/* Accept all multicast addresses if there are more than we * can handle
*/ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
f |= OCTNET_IFFLAG_ALLMULTI;
}
if (netdev->flags & IFF_BROADCAST)
f |= OCTNET_IFFLAG_BROADCAST;
/* copy all the addresses into the udd */
mac = &nctrl.udd[0];
netdev_for_each_uc_addr(ha, netdev) {
ether_addr_copy(((u8 *)mac) + 2, ha->addr);
mac++;
}
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
nctrl.ncmd.s.param1 = get_new_flags(netdev);
nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
ether_addr_copy(((u8 *)mc) + 2, ha->addr); /* no need to swap bytes */ if (++mc > &nctrl.udd[mc_count]) break;
}
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
/* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response.
*/
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
}
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT;
switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; default: return -ERANGE;
}
switch (conf.rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE;
}
if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
/* send_nic_timestamp_pkt - Send a data packet that will be timestamped * @oct: octeon device * @ndata: pointer to network data * @finfo: pointer to private network data
*/ staticint send_nic_timestamp_pkt(struct octeon_device *oct, struct octnic_data_pkt *ndata, struct octnet_buf_free_info *finfo, int xmit_more)
{ struct octeon_soft_command *sc; int ring_doorbell; struct lio *lio; int retval;
u32 len;
/** * liquidio_xmit - Transmit networks packets to the Octeon interface * @skb: skbuff struct to be passed to network layer. * @netdev: pointer to network device * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
{ struct octnet_buf_free_info *finfo; union octnic_cmd_setup cmdsetup; struct octnic_data_pkt ndata; struct octeon_instr_irh *irh; struct oct_iq_stats *stats; struct octeon_device *oct; int q_idx = 0, iq_no = 0; union tx_info *tx_info; int xmit_more = 0; struct lio *lio; int status = 0;
u64 dptr = 0;
u32 tag = 0; int j;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
q_idx = skb_iq(lio->oct_dev, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
stats = &oct->instr_queue[iq_no]->stats;
/* Check for all conditions in which the current packet cannot be * transmitted.
*/ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
(!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
lio->linfo.link.s.link_up); goto lio_xmit_failed;
}
/* Use space in skb->cb to store info used to unmap and * free the buffers.
*/
finfo = (struct octnet_buf_free_info *)skb->cb;
finfo->lio = lio;
finfo->skb = skb;
finfo->sc = NULL;
/* Prepare the attributes for the data to be passed to OSI. */
memset(&ndata, 0, sizeof(struct octnic_data_pkt));
ndata.buf = finfo;
ndata.q_no = iq_no;
if (octnet_iq_is_full(oct, ndata.q_no)) { /* defer sending if queue is full */
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
ndata.q_no);
stats->tx_iq_busy++; return NETDEV_TX_BUSY;
}
if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
}
/* HW insert VLAN tag */ if (skb_vlan_tag_present(skb)) {
irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); if (status == IQ_SEND_FAILED) goto lio_xmit_failed;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
ret); if (ret > 0)
ret = -EIO;
} return ret;
}
/* Disable LRO if RXCSUM is off */ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
(lio->dev_capability & NETIF_F_LRO))
request &= ~NETIF_F_LRO;
return request;
}
/** \brief Net device set features * @param netdev pointer to network device * @param features features to enable/disable
*/ staticint liquidio_set_features(struct net_device *netdev,
netdev_features_t features)
{ struct lio *lio = netdev_priv(netdev);
if (!((netdev->features ^ features) & NETIF_F_LRO)) return 0;
for (i = 0; i < oct->ifcount; i++) { if (oct->props[i].gmxport == gmxport) {
update_link_status(oct->props[i].netdev, ls); break;
}
}
nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++)
recv_buffer_free(recv_pkt->buffer_ptr[i]);
octeon_free_recv_info(recv_info); return 0;
}
/** * setup_nic_devices - Setup network interfaces * @octeon_dev: octeon device * * Called during init time for each device. It assumes the NIC * is already up and running. The link information for each * interface is passed in link_info.
*/ staticint setup_nic_devices(struct octeon_device *octeon_dev)
{ int retval, num_iqueues, num_oqueues;
u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; struct octdev_props *props; struct net_device *netdev; struct lio_version *vdata; struct lio *lio = NULL;
u8 mac[ETH_ALEN], i, j;
u32 ifidx_or_pfnum;
ifidx_or_pfnum = octeon_dev->pf_num;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
lio_nic_info, octeon_dev);
/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. * They are handled directly.
*/
octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
free_netbuf);
retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) {
dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */
octeon_free_soft_command(octeon_dev, sc); return(-EIO);
}
/* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); if (retval) return retval;
/* Point to the properties for octeon device to which this * interface belongs.
*/
lio->oct_dev = octeon_dev;
lio->octprops = props;
lio->netdev = netdev;
dev_dbg(&octeon_dev->pci_dev->dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
/* 64-bit swap required on LE machines */
octeon_swap_8B_data(&lio->linfo.hw_addr, 1); for (j = 0; j < ETH_ALEN; j++)
mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
/* Copy MAC Address to OS network device structure */
eth_hw_addr_set(netdev, mac);
if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_free;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
/* For VFs, enable Octeon device interrupts here, * as this is contingent upon IO queue setup
*/
octeon_dev->fn_list.enable_interrupt(octeon_dev,
OCTEON_ALL_INTR);
/* By default all interfaces on a single Octeon uses the same * tx and rx queues
*/
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_free;
}
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev); if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; else
octeon_dev->priv_flags = 0x0;
if (netdev->features & NETIF_F_LRO)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
if (setup_link_status_change_wq(netdev)) goto setup_nic_dev_free;
if (setup_rx_oom_poll_fn(netdev)) goto setup_nic_dev_free;
/* Register the network device with the OS */ if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); goto setup_nic_dev_free;
}
dev_dbg(&octeon_dev->pci_dev->dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
netif_carrier_off(netdev);
lio->link_changes++;
ifstate_set(lio, LIO_IFSTATE_REGISTERED);
/* Sending command to firmware to enable Rx checksum offload * by default at the time of setup of Liquidio driver for * this device
*/
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_ENABLE);
liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
OCTNET_CMD_TXCSUM_ENABLE);
/** * liquidio_init_nic_module - initialize the NIC * @oct: octeon device * * This initialization routine is called once the Octeon device application is * up and running
*/ staticint liquidio_init_nic_module(struct octeon_device *oct)
{ int num_nic_ports = 1; int i, retval = 0;
/* only default iq and oq were initialized * initialize the rest as well run port_config command for each port
*/
oct->ifcount = num_nic_ports;
memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
for (i = 0; i < MAX_OCTEON_LINKS; i++)
oct->props[i].gmxport = -1;
retval = setup_nic_devices(oct); if (retval) {
dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); goto octnet_init_failure;
}
/** * octeon_device_init - Device initialization for each Octeon device that is probed * @oct: octeon device
*/ staticint octeon_device_init(struct octeon_device *oct)
{
u32 rev_id; int j;
atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
/* Enable access to the octeon device and make its DMA capability * known to the OS.
*/ if (octeon_pci_os_setup(oct)) return 1;
atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
/* Initialize soft command buffer pool */ if (octeon_setup_sc_buffer_pool(oct)) {
dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); return 1;
}
atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
/* Setup the data structures that manage this Octeon's Input queues. */ if (octeon_setup_instr_queues(oct)) {
dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); return 1;
}
atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
/* Initialize lists to manage the requests of different types that * arrive from user & kernel applications for this octeon device.
*/ if (octeon_setup_response_list(oct)) {
dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); return 1;
}
atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
/* Setup the interrupt handler and record the INT SUM register address*/ if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) return 1;
atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
/* *************************************************************** * The interrupts need to be enabled for the PF<-->VF handshake. * They are [re]-enabled after the PF<-->VF handshake so that the * correct OQ tick value is used (i.e. the value retrieved from * the PF as part of the handshake).
*/
/* Enable the input and output queues for this Octeon device */ if (oct->fn_list.enable_io_queues(oct)) {
dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); return 1;
}
atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
atomic_set(&oct->status, OCT_DEV_HOST_OK);
/* Send Credit for Octeon Output queues. Credits are always sent after * the output queue is enabled.
*/ for (j = 0; j < oct->num_oqs; j++)
writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
/* Packets can start arriving on the output queues from this point. */
atomic_set(&oct->status, OCT_DEV_CORE_OK);
atomic_set(&oct->status, OCT_DEV_RUNNING);
if (liquidio_init_nic_module(oct)) return 1;
return 0;
}
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.