/********************************************************************** * Author: Cavium, Inc. * * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/firmware.h> #include <net/vxlan.h> #include <linux/kthread.h> #include"liquidio_common.h" #include"octeon_droq.h" #include"octeon_iq.h" #include"response_manager.h" #include"octeon_device.h" #include"octeon_nic.h" #include"octeon_main.h" #include"octeon_network.h" #include"cn66xx_regs.h" #include"cn66xx_device.h" #include"cn68xx_device.h" #include"cn23xx_pf_device.h" #include"liquidio_image.h" #include"lio_vf_rep.h"
staticint ddr_timeout = 10000;
module_param(ddr_timeout, int, 0644);
MODULE_PARM_DESC(ddr_timeout, "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
staticchar fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
static u32 console_bitmask;
module_param(console_bitmask, int, 0644);
MODULE_PARM_DESC(console_bitmask, "Bitmask indicating which consoles have debug output redirected to syslog.");
/** * octeon_console_debug_enabled - determines if a given console has debug enabled. * @console: console to check * Return: 1 = enabled. 0 otherwise
*/ staticint octeon_console_debug_enabled(u32 console)
{ return (console_bitmask >> (console)) & 0x1;
}
/* Polling interval for determining when NIC application is alive */ #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
/* runtime link query interval */ #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 /* update localtime to octeon firmware every 60 seconds. * make firmware to use same time reference, so that it will be easy to * correlate firmware logged events/errors with host events, for debugging.
*/ #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
/* time to wait for possible in-flight requests in milliseconds */ #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
/* Octeon device properties to be used by the NIC module. * Each octeon device in the system will be represented * by this structure in the NIC module.
*/
if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { /* set time and cnt interrupt thresholds for this DROQ * for NAPI
*/ int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue;
pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
} if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
tasklet_schedule(&oct_priv->droq_tasklet);
}
pkt_cnt = 0;
schedule_timeout_uninterruptible(1);
} while (retry-- && pending_pkts);
return pkt_cnt;
}
/** * force_io_queues_off - Forces all IO queues off on a given device * @oct: Pointer to Octeon device
*/ staticvoid force_io_queues_off(struct octeon_device *oct)
{ if ((oct->chip_id == OCTEON_CN66XX) ||
(oct->chip_id == OCTEON_CN68XX)) { /* Reset the Enable bits for Input Queues. */
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
/* Reset the Enable bits for Output Queues. */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
}
}
/** * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc * @oct: Pointer to Octeon device
*/ staticinlinevoid pcierror_quiesce_device(struct octeon_device *oct)
{ int i;
/* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet processing * to finish.
*/
force_io_queues_off(oct);
/* To allow for in-flight requests */
schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
iq = oct->instr_queue[i];
/** * stop_pci_io - Stop all PCI IO to a given device * @oct: Pointer to Octeon device
*/ staticvoid stop_pci_io(struct octeon_device *oct)
{ /* No more instructions will be forwarded. */
atomic_set(&oct->status, OCT_DEV_IN_RESET);
/* Release the interrupt line */
free_irq(oct->pci_dev->irq, oct);
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
/* making it a common function for all OCTEON models */
cleanup_aer_uncorrect_error_status(oct->pci_dev);
}
/** * liquidio_pcie_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected.
*/ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{ struct octeon_device *oct = pci_get_drvdata(pdev);
/* Always return a DISCONNECT. There is no support for recovery but only * for a clean shutdown.
*/ return PCI_ERS_RESULT_DISCONNECT;
}
/** * liquidio_pcie_mmio_enabled - mmio handler * @pdev: Pointer to PCI device
*/ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
{ /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. * But play safe and return RECOVERED for now.
*/ return PCI_ERS_RESULT_RECOVERED;
}
/** * liquidio_pcie_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the octeon_resume routine.
*/ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
{ /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. * But play safe and return RECOVERED for now.
*/ return PCI_ERS_RESULT_RECOVERED;
}
/** * liquidio_pcie_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the octeon_resume routine.
*/ staticvoid liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
{ /* Nothing to be done here. */
}
/** * check_txq_status - Check Tx queue status, and take appropriate action * @lio: per-network private data * Return: 0 if full, number of queues woken up otherwise
*/ staticinlineint check_txq_status(struct lio *lio)
{ int numqs = lio->netdev->real_num_tx_queues; int ret_val = 0; int q, iq;
/* check each sub-queue state */ for (q = 0; q < numqs; q++) {
iq = lio->linfo.txpciq[q %
lio->oct_dev->num_iqs].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; if (__netif_subqueue_stopped(lio->netdev, q)) {
netif_wake_subqueue(lio->netdev, q);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
tx_restart, 1);
ret_val++;
}
}
/* lio->linfo.link.s.mtu always contains max MTU of the lio interface. * this API is invoked only when new max-MTU of the interface is * less than current MTU.
*/
rtnl_lock();
dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
rtnl_unlock();
}
/** * setup_link_status_change_wq - Sets up the mtu status change work * @netdev: network device
*/ staticinlineint setup_link_status_change_wq(struct net_device *netdev)
{ struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
WQ_MEM_RECLAIM, 0); if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); return -1;
}
INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
octnet_link_status_change);
lio->link_status_wq.wk.ctxptr = lio;
if (lio->link_status_wq.wq) {
cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
destroy_workqueue(lio->link_status_wq.wq);
}
}
/** * update_link_status - Update link status * @netdev: network device * @ls: link status structure * * Called on receipt of a link status response from the core application to * update each interface's link status.
*/ staticinlinevoid update_link_status(struct net_device *netdev, union oct_link_status *ls)
{ struct lio *lio = GET_LIO(netdev); int changed = (lio->linfo.link.u64 != ls->u64); int current_max_mtu = lio->linfo.link.s.mtu; struct octeon_device *oct = lio->oct_dev;
if ((lio->intf_open) && (changed)) {
print_link_info(netdev);
lio->link_changes++;
if (lio->linfo.link.s.link_up) {
dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
netif_carrier_on(netdev);
wake_txqs(netdev);
} else {
dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
netif_carrier_off(netdev);
stop_txqs(netdev);
} if (lio->linfo.link.s.mtu != current_max_mtu) {
netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
current_max_mtu, lio->linfo.link.s.mtu);
netdev->max_mtu = lio->linfo.link.s.mtu;
} if (lio->linfo.link.s.mtu < netdev->mtu) {
dev_warn(&oct->pci_dev->dev, "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
netdev->mtu, lio->linfo.link.s.mtu);
queue_delayed_work(lio->link_status_wq.wq,
&lio->link_status_wq.wk.work, 0);
}
}
}
/** * lio_sync_octeon_time - send latest localtime to octeon firmware so that * firmware will correct it's time, in case there is a time skew * * @work: work scheduled to send time update to octeon firmware
**/ staticvoid lio_sync_octeon_time(struct work_struct *work)
{ struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; struct octeon_device *oct = lio->oct_dev; struct octeon_soft_command *sc; struct timespec64 ts; struct lio_time *lt; int ret;
sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); if (!sc) {
dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: soft command allocation failed\n"); return;
}
lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */
ktime_get_real_ts64(&ts);
lt->sec = ts.tv_sec;
lt->nsec = ts.tv_nsec;
octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: failed to send soft command\n");
octeon_free_soft_command(oct, sc);
} else {
WRITE_ONCE(sc->caller_is_done, true);
}
/** * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware * * @netdev: network device which should send time update to firmware
**/ staticinlineint setup_sync_octeon_time_wq(struct net_device *netdev)
{ struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); return -1;
}
INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
lio_sync_octeon_time);
lio->sync_octeon_time_wq.wk.ctxptr = lio;
queue_delayed_work(lio->sync_octeon_time_wq.wq,
&lio->sync_octeon_time_wq.wk.work,
msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
return 0;
}
/** * cleanup_sync_octeon_time_wq - destroy wq * * @netdev: network device which should send time update to firmware * * Stop scheduling and destroy the work created to periodically update local * time to octeon firmware.
**/ staticinlinevoid cleanup_sync_octeon_time_wq(struct net_device *netdev)
{ struct lio *lio = GET_LIO(netdev); struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
if (time_wq->wq) {
cancel_delayed_work_sync(&time_wq->wk.work);
destroy_workqueue(time_wq->wq);
}
}
while (!kthread_should_stop()) { /* sleep for a couple of seconds so that we don't hog the CPU */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(2000));
if (oct_dev->octeon_id == 0) /* first LiquidIO NIC is detected */
complete(&first_stage);
if (octeon_device_init(oct_dev)) {
complete(&hs->init);
liquidio_remove(pdev); return -ENOMEM;
}
if (OCTEON_CN23XX_PF(oct_dev)) {
u8 bus, device, function;
if (atomic_read(oct_dev->adapter_refcount) == 1) { /* Each NIC gets one watchdog kernel thread. The first * PF (of each NIC) that gets pci_driver->probe()'d * creates that thread.
*/
bus = pdev->bus->number;
device = PCI_SLOT(pdev->devfn);
function = PCI_FUNC(pdev->devfn);
oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
oct_dev, "liowd/%02hhx:%02hhx.%hhx",
bus, device, function); if (IS_ERR(oct_dev->watchdog_task)) {
oct_dev->watchdog_task = NULL;
dev_err(&oct_dev->pci_dev->dev, "failed to create kernel_thread\n");
liquidio_remove(pdev); return -1;
}
}
}
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
switch (atomic_read(&oct->status)) { case OCT_DEV_RUNNING: case OCT_DEV_CORE_OK:
/* No more instructions will be forwarded. */
atomic_set(&oct->status, OCT_DEV_IN_RESET);
oct->app_mode = CVM_DRV_INVALID_APP;
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
schedule_timeout_uninterruptible(HZ / 10);
fallthrough; case OCT_DEV_HOST_OK:
case OCT_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */
octeon_remove_consoles(oct);
fallthrough; case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish.
*/
oct->fn_list.disable_io_queues(oct);
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* Force all requests waiting to be fetched by OCTEON to * complete.
*/ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
iq = oct->instr_queue[i];
fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: if (OCTEON_CN23XX_PF(oct))
octeon_free_ioq_vector(oct);
fallthrough; case OCT_DEV_MBOX_SETUP_DONE: if (OCTEON_CN23XX_PF(oct))
oct->fn_list.free_mbox(oct);
fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */
mdelay(100); for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue;
octeon_delete_droq(oct, i);
}
/* Force any pending handshakes to complete */ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
hs = &handshake[i];
fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue;
octeon_delete_instr_queue(oct, i);
} #ifdef CONFIG_PCI_IOV if (oct->sriov_info.sriov_enabled)
pci_disable_sriov(oct->pci_dev); #endif
fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
fallthrough; case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
fallthrough; case OCT_DEV_PCI_MAP_DONE:
refcount = octeon_deregister_device(oct);
/* Soft reset the octeon device before exiting. * However, if fw was loaded from card (i.e. autoboot), * perform an FLR instead. * Implementation note: only soft-reset the device * if it is a CN6XXX OR the LAST CN23XX device.
*/ if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
octeon_pci_flr(oct); elseif (OCTEON_CN6XXX(oct) || !refcount)
oct->fn_list.soft_reset(oct);
retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
} else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0); if (retval) return retval;
/** * liquidio_destroy_nic_device - Destroy NIC device interface * @oct: octeon device * @ifidx: which interface to destroy * * Cleanup associated with each interface for an Octeon device when NIC * module is being unloaded or if initialization fails during load.
*/ staticvoid liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{ struct net_device *netdev = oct->props[ifidx].netdev; struct octeon_device_priv *oct_priv = oct->priv; struct napi_struct *napi, *n; struct lio *lio;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
__func__, ifidx); return;
}
/* This octeon device has been removed. Update the global * data structure to reflect this. Free the device structure.
*/
octeon_free_device_mem(oct_dev);
}
/** * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space * @oct: octeon device
*/ staticint octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id; int ret = 1;
switch (dev_id) { case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct); break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct); break;
case OCTEON_CN23XX_PCIID_PF:
oct->chip_id = OCTEON_CN23XX_PF_VID;
ret = setup_cn23xx_octeon_pf_device(oct); if (ret) break; #ifdef CONFIG_PCI_IOV if (!ret)
pci_sriov_set_totalvfs(oct->pci_dev,
oct->sriov_info.max_vfs); #endif break;
default:
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
return ret;
}
/** * octeon_pci_os_setup - PCI initialization for each Octeon device. * @oct: octeon device
*/ staticint octeon_pci_os_setup(struct octeon_device *oct)
{ /* setup PCI stuff first */ if (pci_enable_device(oct->pci_dev)) {
dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); return 1;
}
/** * liquidio_ptp_adjfine - Adjust ptp frequency * @ptp: PTP clock info * @scaled_ppm: how much to adjust by, in scaled parts-per-million * * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/ staticint liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{ struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u64 comp, delta; unsignedlong flags; bool neg_adj = false;
if (ppb < 0) {
neg_adj = true;
ppb = -ppb;
}
/* The hardware adds the clock compensation value to the * PTP clock on every coprocessor clock cycle, so we * compute the delta in terms of coprocessor clocks.
*/
delta = (u64)ppb << 32;
do_div(delta, oct->coproc_clock_rate);
/* Tell Octeon that nic interface is down. */
ret = send_rx_ctrl_cmd(lio, 0); if (ret) return ret;
if (OCTEON_CN23XX_PF(oct)) { if (!oct->msix_on)
cleanup_tx_poll_fn(netdev);
} else {
cleanup_tx_poll_fn(netdev);
}
cancel_delayed_work_sync(&lio->stats_wk.work);
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
}
/* Wait for any pending Rx descriptors */ if (lio_wait_for_clean_oq(oct))
netif_info(lio, rx_err, lio->netdev, "Proceeding with stop interface after partial RX desc processing\n");
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
if (OCTEON_CN23XX_PF(oct))
oct->droq[0]->ops.poll_mode = 0;
tasklet_enable(&oct_priv->droq_tasklet);
}
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
return ret;
}
/** * get_new_flags - Converts a mask based on net device flags * @netdev: network device * * This routine generates a octnet_ifflags mask from the net device flags * received from the OS.
*/ staticinlineenum octnet_ifflags get_new_flags(struct net_device *netdev)
{ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
if (netdev->flags & IFF_PROMISC)
f |= OCTNET_IFFLAG_PROMISC;
if (netdev->flags & IFF_ALLMULTI)
f |= OCTNET_IFFLAG_ALLMULTI;
if (netdev->flags & IFF_MULTICAST) {
f |= OCTNET_IFFLAG_MULTICAST;
/* Accept all multicast addresses if there are more than we * can handle
*/ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
f |= OCTNET_IFFLAG_ALLMULTI;
}
if (netdev->flags & IFF_BROADCAST)
f |= OCTNET_IFFLAG_BROADCAST;
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
nctrl.ncmd.s.param1 = get_new_flags(netdev);
nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); /* no need to swap bytes */
if (++mc > &nctrl.udd[mc_count]) break;
}
/* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response.
*/
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
}
}
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT;
switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; default: return -ERANGE;
}
switch (conf.rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE;
}
if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
/** * send_nic_timestamp_pkt - Send a data packet that will be timestamped * @oct: octeon device * @ndata: pointer to network data * @finfo: pointer to private network data * @xmit_more: more is coming
*/ staticinlineint send_nic_timestamp_pkt(struct octeon_device *oct, struct octnic_data_pkt *ndata, struct octnet_buf_free_info *finfo, int xmit_more)
{ int retval; struct octeon_soft_command *sc; struct lio *lio; int ring_doorbell;
u32 len;
if (OCTEON_CN23XX_PF(oct))
len = (u32)((struct octeon_instr_ih3 *)
(&sc->cmd.cmd3.ih3))->dlengsz; else
len = (u32)((struct octeon_instr_ih2 *)
(&sc->cmd.cmd2.ih2))->dlengsz;
/** * liquidio_xmit - Transmit networks packets to the Octeon interface * @skb: skbuff struct to be passed to network layer. * @netdev: pointer to network device * * Return: whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
{ struct lio *lio; struct octnet_buf_free_info *finfo; union octnic_cmd_setup cmdsetup; struct octnic_data_pkt ndata; struct octeon_device *oct; struct oct_iq_stats *stats; struct octeon_instr_irh *irh; union tx_info *tx_info; int status = 0; int q_idx = 0, iq_no = 0; int j, xmit_more = 0;
u64 dptr = 0;
u32 tag = 0;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
q_idx = skb_iq(oct, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
stats = &oct->instr_queue[iq_no]->stats;
/* Check for all conditions in which the current packet cannot be * transmitted.
*/ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
(!lio->linfo.link.s.link_up) ||
(skb->len <= 0)) {
netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
lio->linfo.link.s.link_up); goto lio_xmit_failed;
}
/* Use space in skb->cb to store info used to unmap and * free the buffers.
*/
finfo = (struct octnet_buf_free_info *)skb->cb;
finfo->lio = lio;
finfo->skb = skb;
finfo->sc = NULL;
/* Prepare the attributes for the data to be passed to OSI. */
memset(&ndata, 0, sizeof(struct octnic_data_pkt));
ndata.buf = (void *)finfo;
ndata.q_no = iq_no;
if (octnet_iq_is_full(oct, ndata.q_no)) { /* defer sending if queue is full */
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
ndata.q_no);
stats->tx_iq_busy++; return NETDEV_TX_BUSY;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.