/* * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet * driver for Linux. * * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X then MSI. This parameter determines which of these schemes the * driver may consider as follows: * * msi = 2: choose from among MSI-X and MSI * msi = 1: only consider MSI interrupts * * Note that unlike the Physical Function driver, this Virtual Function driver * does _not_ support legacy INTx interrupts (this limitation is mandated by * the PCI-E SR-IOV standard).
*/ #define MSI_MSIX 2 #define MSI_MSI 1 #define MSI_DEFAULT MSI_MSIX
staticint msi = MSI_DEFAULT;
module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
/* * Fundamental constants. * ======================
*/
/* * For purposes of manipulating the Free List size we need to * recognize that Free Lists are actually Egress Queues (the host * produces free buffers which the hardware consumes), Egress Queues * indices are all in units of Egress Context Units bytes, and free * list entries are 64-bit PCI DMA addresses. And since the state of * the Producer Index == the Consumer Index implies an EMPTY list, we * always have at least one Egress Unit's worth of Free List entries * unused. See sge.c for more details ...
*/
EQ_UNIT = SGE_EQ_IDXSIZE,
FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
MIN_FL_RESID = FL_PER_EQ_UNIT,
};
/* * Global driver state. * ====================
*/
staticstruct dentry *cxgb4vf_debugfs_root;
/* * OS "Callback" functions. * ========================
*/
/* * The link status has changed on the indicated "port" (Virtual Interface).
*/ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
{ struct net_device *dev = adapter->port[pidx];
/* * If the port is disabled or the current recorded "link up" * status matches the new status, just return.
*/ if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) return;
/* * Tell the OS that the link status has changed and print a short * informative message on the console about the event.
*/ if (link_ok) { constchar *s; constchar *fc; conststruct port_info *pi = netdev_priv(dev);
netif_carrier_on(dev);
switch (pi->link_cfg.speed) { case 100:
s = "100Mbps"; break; case 1000:
s = "1Gbps"; break; case 10000:
s = "10Gbps"; break; case 25000:
s = "25Gbps"; break; case 40000:
s = "40Gbps"; break; case 100000:
s = "100Gbps"; break;
default:
s = "unknown"; break;
}
switch ((int)pi->link_cfg.fc) { case PAUSE_RX:
fc = "RX"; break;
/* Calculate the hash vector for the updated list and program it */
list_for_each_entry(entry, &adapter->mac_hlist, list) {
ucast |= is_unicast_ether_addr(entry->addr);
vec |= (1ULL << hash_mac_addr(entry->addr));
} return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
}
/** * cxgb4vf_change_mac - Update match filter for a MAC address. * @pi: the port_info * @viid: the VI id * @tcam_idx: TCAM index of existing filter for old value of MAC address, * or -1 * @addr: the new MAC address value * @persistent: whether a new MAC allocation should be persistent * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if * @tcam_idx < 0. In the latter case the address is added persistently * if @persist is %true. * Addresses are programmed to hash region, if tcam runs out of entries. *
*/ staticint cxgb4vf_change_mac(struct port_info *pi, unsignedint viid, int *tcam_idx, const u8 *addr, bool persistent)
{ struct hash_mac_addr *new_entry, *entry; struct adapter *adapter = pi->adapter; int ret;
ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent); /* We ran out of TCAM entries. try programming hash region. */ if (ret == -ENOMEM) { /* If the MAC address to be updated is in the hash addr * list, update it from the list
*/
list_for_each_entry(entry, &adapter->mac_hlist, list) { if (entry->iface_mac) {
ether_addr_copy(entry->addr, addr); goto set_hash;
}
}
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); if (!new_entry) return -ENOMEM;
ether_addr_copy(new_entry->addr, addr);
new_entry->iface_mac = true;
list_add_tail(&new_entry->list, &adapter->mac_hlist);
set_hash:
ret = cxgb4vf_set_addr_hash(pi);
} elseif (ret >= 0) {
*tcam_idx = ret;
ret = 0;
}
return ret;
}
/* * Net device operations. * ======================
*/
/* * Perform the MAC and PHY actions needed to enable a "port" (Virtual * Interface).
*/ staticint link_start(struct net_device *dev)
{ int ret; struct port_info *pi = netdev_priv(dev);
/* * We do not set address filters and promiscuity here, the stack does * that step explicitly. Enable vlan accel.
*/
ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, true); if (ret == 0)
ret = cxgb4vf_change_mac(pi, pi->viid,
&pi->xact_addr_filt,
dev->dev_addr, true);
/* * We don't need to actually "start the link" itself since the * firmware will do that for us when the first Virtual Interface * is enabled on a port.
*/ if (ret == 0)
ret = t4vf_enable_pi(pi->adapter, pi, true, true);
return ret;
}
/* * Name the MSI-X interrupts.
*/ staticvoid name_msix_vecs(struct adapter *adapter)
{ int namelen = sizeof(adapter->msix_info[0].desc) - 1; int pidx;
/* * Turn on NAPI and start up interrupts on a response queue.
*/ staticvoid qenable(struct sge_rspq *rspq)
{
napi_enable(&rspq->napi);
/* * 0-increment the Going To Sleep register to start the timer and * enable interrupts.
*/
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(rspq->intr_params) |
INGRESSQID_V(rspq->cntxt_id));
}
/* * Enable NAPI scheduling and interrupt generation for all Receive Queues.
*/ staticvoid enable_rx(struct adapter *adapter)
{ int rxq; struct sge *s = &adapter->sge;
/* * The interrupt queue doesn't use NAPI so we do the 0-increment of * its Going To Sleep register here to get it started.
*/ if (adapter->flags & CXGB4VF_USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(s->intrq.intr_params) |
INGRESSQID_V(s->intrq.cntxt_id));
}
/* * Wait until all NAPI handlers are descheduled.
*/ staticvoid quiesce_rx(struct adapter *adapter)
{ struct sge *s = &adapter->sge; int rxq;
switch (opcode) { case CPL_FW6_MSG: { /* * We've received an asynchronous message from the firmware.
*/ conststruct cpl_fw6_msg *fw_msg = cpl; if (fw_msg->type == FW6_TYPE_CMD_RPL)
t4vf_handle_fw_rpl(adapter, fw_msg->data); break;
}
case CPL_FW4_MSG: { /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/ conststruct cpl_sge_egr_update *p = (void *)(rsp + 3);
opcode = CPL_OPCODE_G(ntohl(p->opcode_qid)); if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode); break;
}
cpl = (void *)p;
}
fallthrough;
case CPL_SGE_EGR_UPDATE: { /* * We've received an Egress Queue Status Update message. We * get these, if the SGE is configured to send these when the * firmware passes certain points in processing our TX * Ethernet Queue or if we make an explicit request for one. * We use these updates to determine when we may need to * restart a TX Ethernet Queue which was stopped for lack of * free TX Queue Descriptors ...
*/ conststruct cpl_sge_egr_update *p = cpl; unsignedint qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); struct sge *s = &adapter->sge; struct sge_txq *tq; struct sge_eth_txq *txq; unsignedint eq_idx;
/* * Perform sanity checking on the Queue ID to make sure it * really refers to one of our TX Ethernet Egress Queues which * is active and matches the queue's ID. None of these error * conditions should ever happen so we may want to either make * them fatal and/or conditionalized under DEBUG.
*/
eq_idx = EQ_IDX(s, qid); if (unlikely(eq_idx >= MAX_EGRQ)) {
dev_err(adapter->pdev_dev, "Egress Update QID %d out of range\n", qid); break;
}
tq = s->egr_map[eq_idx]; if (unlikely(tq == NULL)) {
dev_err(adapter->pdev_dev, "Egress Update QID %d TXQ=NULL\n", qid); break;
}
txq = container_of(tq, struct sge_eth_txq, q); if (unlikely(tq->abs_id != qid)) {
dev_err(adapter->pdev_dev, "Egress Update QID %d refers to TXQ %d\n",
qid, tq->abs_id); break;
}
/* * Restart a stopped TX Queue which has less than half of its * TX ring in use ...
*/
txq->q.restarts++;
netif_tx_wake_queue(txq->txq); break;
}
/* * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues * to use and initializes them. We support multiple "Queue Sets" per port if * we have MSI-X, otherwise just one queue set per port.
*/ staticint setup_sge_queues(struct adapter *adapter)
{ struct sge *s = &adapter->sge; int err, pidx, msix;
/* * Clear "Queue Set" Free List Starving and TX Queue Mapping Error * state.
*/
bitmap_zero(s->starving_fl, MAX_EGRQ);
/* * If we're using MSI interrupt mode we need to set up a "forwarded * interrupt" queue which we'll set up with our MSI vector. The rest * of the ingress queues will be set up to forward their interrupts to * this queue ... This must be first since t4vf_sge_alloc_rxq() uses * the intrq's queue ID as the interrupt forwarding queue for the * subsequent calls ...
*/ if (adapter->flags & CXGB4VF_USING_MSI) {
err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
adapter->port[0], 0, NULL, NULL); if (err) goto err_free_queues;
}
/* * Allocate each "port"'s initial Queue Sets. These can be changed * later on ... up to the point where any interface on the adapter is * brought up at which point lots of things get nailed down * permanently ...
*/
msix = MSIX_IQFLINT;
for_each_port(adapter, pidx) { struct net_device *dev = adapter->port[pidx]; struct port_info *pi = netdev_priv(dev); struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; int qs;
/* * The FW_IQ_CMD doesn't return the Absolute Queue IDs * for Free Lists but since all of the Egress Queues * (including Free Lists) have Relative Queue IDs * which are computed as Absolute - Base Queue ID, we * can synthesize the Absolute Queue IDs for the Free * Lists. This is useful for debugging purposes when * we want to dump Queue Contexts via the PF Driver.
*/
rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
}
} return 0;
/* * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive * queues. We configure the RSS CPU lookup table to distribute to the number * of HW receive queues, and the response queue lookup table to narrow that * down to the response queues actually configured for each "port" (Virtual * Interface). We always configure the RSS mapping for all ports since the * mapping table has plenty of entries.
*/ staticint setup_rss(struct adapter *adapter)
{ int pidx;
/* * Perform Global RSS Mode-specific initialization.
*/ switch (adapter->params.rss.mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: /* * If Tunnel All Lookup isn't specified in the global * RSS Configuration, then we need to specify a * default Ingress Queue for any ingress packets which * aren't hashed. We'll use our first ingress queue * ...
*/ if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { union rss_vi_config config;
err = t4vf_read_rss_vi_config(adapter,
pi->viid,
&config); if (err) return err;
config.basicvirtual.defaultq =
rxq[0].rspq.abs_id;
err = t4vf_write_rss_vi_config(adapter,
pi->viid,
&config); if (err) return err;
} break;
}
}
return 0;
}
/* * Bring the adapter up. Called whenever we go from no "ports" open to having * one open. This function performs the actions necessary to make an adapter * operational, such as completing the initialization of HW modules, and * enabling interrupts. Must be called with the rtnl lock held. (Note that * this is called "cxgb_up" in the PF Driver.)
*/ staticint adapter_up(struct adapter *adapter)
{ int err;
/* * If this is the first time we've been called, perform basic * adapter setup. Once we've done this, many of our adapter * parameters can no longer be changed ...
*/ if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
err = setup_sge_queues(adapter); if (err) return err;
err = setup_rss(adapter); if (err) {
t4vf_free_sge_resources(adapter); return err;
}
if (adapter->flags & CXGB4VF_USING_MSIX)
name_msix_vecs(adapter);
adapter->flags |= CXGB4VF_FULL_INIT_DONE;
}
/* * Acquire our interrupt resources. We only support MSI-X and MSI.
*/
BUG_ON((adapter->flags &
(CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); if (adapter->flags & CXGB4VF_USING_MSIX)
err = request_msix_queue_irqs(adapter); else
err = request_irq(adapter->pdev->irq,
t4vf_intr_handler(adapter), 0,
adapter->name, adapter); if (err) {
dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
err); return err;
}
/* * Enable NAPI ingress processing and return success.
*/
enable_rx(adapter);
t4vf_sge_start(adapter);
return 0;
}
/* * Bring the adapter down. Called whenever the last "port" (Virtual * Interface) closed. (Note that this routine is called "cxgb_down" in the PF * Driver.)
*/ staticvoid adapter_down(struct adapter *adapter)
{ /* * Free interrupt resources.
*/ if (adapter->flags & CXGB4VF_USING_MSIX)
free_msix_queue_irqs(adapter); else
free_irq(adapter->pdev->irq, adapter);
/* * Wait for NAPI handlers to finish.
*/
quiesce_rx(adapter);
}
/* * Start up a net device.
*/ staticint cxgb4vf_open(struct net_device *dev)
{ int err; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter;
/* * If we don't have a connection to the firmware there's nothing we * can do.
*/ if (!(adapter->flags & CXGB4VF_FW_OK)) return -ENXIO;
/* * If this is the first interface that we're opening on the "adapter", * bring the "adapter" up now.
*/ if (adapter->open_device_map == 0) {
err = adapter_up(adapter); if (err) return err;
}
/* It's possible that the basic port information could have * changed since we first read it.
*/
err = t4vf_update_port_info(pi); if (err < 0) goto err_unwind;
/* * Note that this interface is up and start everything up ...
*/
err = link_start(dev); if (err) goto err_unwind;
err_unwind: if (adapter->open_device_map == 0)
adapter_down(adapter); return err;
}
/* * Shut down a net device. This routine is called "cxgb_close" in the PF * Driver ...
*/ staticint cxgb4vf_stop(struct net_device *dev)
{ struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter;
ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
NULL, ucast ? &uhash : &mhash, false); if (ret < 0) goto out; /* if hash != 0, then add the addr to hash addr list * so on the end we will calculate the hash for the * list and program it
*/ if (uhash || mhash) {
new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); if (!new_entry) return -ENOMEM;
ether_addr_copy(new_entry->addr, mac_addr);
list_add_tail(&new_entry->list, &adapter->mac_hlist);
ret = cxgb4vf_set_addr_hash(pi);
}
out: return ret < 0 ? ret : 0;
}
/* If the MAC address to be removed is in the hash addr * list, delete it from the list and update hash vector
*/
list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { if (ether_addr_equal(entry->addr, mac_addr)) {
list_del(&entry->list);
kfree(entry); return cxgb4vf_set_addr_hash(pi);
}
}
ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); return ret < 0 ? -EINVAL : 0;
}
/* * Set RX properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged.
*/ staticint set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{ struct port_info *pi = netdev_priv(dev);
/* * Set the current receive modes on the device.
*/ staticvoid cxgb4vf_set_rxmode(struct net_device *dev)
{ /* unfortunately we can't return errors to the stack */
set_rxmode(dev, -1, false);
}
/* * Find the entry in the interrupt holdoff timer value array which comes * closest to the specified interrupt holdoff value.
*/ staticint closest_timer(conststruct sge *s, int us)
{ int i, timer_idx = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { int delta = us - s->timer_val[i]; if (delta < 0)
delta = -delta; if (delta < min_delta) {
min_delta = delta;
timer_idx = i;
}
} return timer_idx;
}
staticint closest_thres(conststruct sge *s, int thres)
{ int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = thres - s->counter_val[i]; if (delta < 0)
delta = -delta; if (delta < min_delta) {
min_delta = delta;
pktcnt_idx = i;
}
} return pktcnt_idx;
}
/* * Return a queue's interrupt hold-off time in us. 0 means no timer.
*/ staticunsignedint qtimer_val(conststruct adapter *adapter, conststruct sge_rspq *rspq)
{ unsignedint timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
/** * set_rxq_intr_params - set a queue's interrupt holdoff parameters * @adapter: the adapter * @rspq: the RX response queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter * * Sets an RX response queue's interrupt hold-off time and packet count. * At least one of the two needs to be enabled for the queue to generate * interrupts.
*/ staticint set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, unsignedint us, unsignedint cnt)
{ unsignedint timer_idx;
/* * If both the interrupt holdoff timer and count are specified as * zero, default to a holdoff count of 1 ...
*/ if ((us | cnt) == 0)
cnt = 1;
/* * If an interrupt holdoff count has been specified, then find the * closest configured holdoff count and use that. If the response * queue has already been created, then update its queue context * parameters ...
*/ if (cnt) { int err;
u32 v, pktcnt_idx;
/* * Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision
*/ staticinlineunsignedint mk_adap_vers(conststruct adapter *adapter)
{ /* * Chip version 4, revision 0x3f (cxgb4vf).
*/ return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
}
/* * Execute the specified ioctl command.
*/ staticint cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{ int ret = 0;
switch (cmd) { /* * The VF Driver doesn't have access to any of the other * common Ethernet device ioctl()'s (like reading/writing * PHY registers, etc.
*/
default:
ret = -EOPNOTSUPP; break;
} return ret;
}
/* * Change the device's MTU.
*/ staticint cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
{ int ret; struct port_info *pi = netdev_priv(dev);
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true); if (!ret)
WRITE_ONCE(dev->mtu, new_mtu); return ret;
}
static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
netdev_features_t features)
{ /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx.
*/ if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX; else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
features & NETIF_F_HW_VLAN_CTAG_TX, 0);
return 0;
}
/* * Change the devices MAC address.
*/ staticint cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
{ int ret; struct sockaddr *addr = _addr; struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
addr->sa_data, true); if (ret < 0) return ret;
eth_hw_addr_set(dev, addr->sa_data); return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER /* * Poll all of our receive queues. This is called outside of normal interrupt * context.
*/ staticvoid cxgb4vf_poll_controller(struct net_device *dev)
{ struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter;
if (adapter->flags & CXGB4VF_USING_MSIX) { struct sge_eth_rxq *rxq; int nqsets;
/* * Ethtool operations. * =================== * * Note that we don't support any ethtool operations which change the physical * state of the port to which we're linked.
*/
/** * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask * @port_type: Firmware Port Type * @fw_caps: Firmware Port Capabilities * @link_mode_mask: ethtool Link Mode Mask * * Translate a Firmware Port Capabilities specification to an ethtool * Link Mode Mask.
*/ staticvoid fw_caps_to_lmm(enum fw_port_type port_type, unsignedint fw_caps, unsignedlong *link_mode_mask)
{ #define SET_LMM(__lmm_name) \
__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
link_mode_mask)
#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
SET_LMM(__lmm_name); \
} while (0)
switch (port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI:
SET_LMM(TP);
FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); break;
case FW_PORT_TYPE_KX4: case FW_PORT_TYPE_KX:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); break;
case FW_PORT_TYPE_KR:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break;
case FW_PORT_TYPE_BP_AP:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break;
case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); break;
case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); break;
case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_SFP28:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); break;
case FW_PORT_TYPE_KR_SFP28:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); break;
case FW_PORT_TYPE_KR_XLAUI:
SET_LMM(Backplane);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full); break;
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full); break;
case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full); break;
/* For the nonce, the Firmware doesn't send up Port State changes * when the Virtual Interface attached to the Port is down. So * if it's down, let's grab any changes.
*/ if (!netif_running(dev))
(void)t4vf_update_port_info(pi);
/* Translate the Firmware FEC Support into the ethtool value. We * always support IEEE 802.3 "automatic" selection of Link FEC type if * any FEC is supported.
*/
fec->fec = fwcap_to_eth_fec(lc->pcaps); if (fec->fec != ETHTOOL_FEC_OFF)
fec->fec |= ETHTOOL_FEC_AUTO;
/* Translate the current internal FEC parameters into the * ethtool values.
*/
fec->active_fec = cc_to_eth_fec(lc->fec); return 0;
}
/* * Set current adapter message level.
*/ staticvoid cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
{
netdev2adap(dev)->msg_enable = msglevel;
}
/* * Return the device's current Queue Set ring size parameters along with the * allowed maximum values. Since ethtool doesn't understand the concept of * multi-queue devices, we just return the current values associated with the * first Queue Set.
*/ staticvoid cxgb4vf_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack)
{ conststruct port_info *pi = netdev_priv(dev); conststruct sge *s = &pi->adapter->sge;
/* * Set the Queue Set ring size parameters for the device. Again, since * ethtool doesn't allow for the concept of multiple queues per device, we'll * apply these new values across all of the Queue Sets associated with the * device -- after vetting them of course!
*/ staticint cxgb4vf_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack)
{ conststruct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; int qs;
/* * Return the interrupt holdoff timer and count for the first Queue Set on the * device. Our extension ioctl() (the cxgbtool interface) allows the * interrupt holdoff timer to be read on all of the device's Queue Sets.
*/ staticint cxgb4vf_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack)
{ conststruct port_info *pi = netdev_priv(dev); conststruct adapter *adapter = pi->adapter; conststruct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
/* * Set the RX interrupt holdoff timer and count for the first Queue Set on the * interface. Our extension ioctl() (the cxgbtool interface) allows us to set * the interrupt holdoff timer on any of the device's Queue Sets.
*/ staticint cxgb4vf_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack)
{ conststruct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter;
/* * Port stats maintained per queue of the port.
*/ struct queue_port_stats {
u64 tso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
u64 lro_pkts;
u64 lro_merged;
};
/* * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that * these need to match the order of statistics returned by * t4vf_get_port_stats().
*/ staticconstchar stats_strings[][ETH_GSTRING_LEN] = { /* * These must match the layout of the t4vf_port_stats structure.
*/ "TxBroadcastBytes ", "TxBroadcastFrames ", "TxMulticastBytes ", "TxMulticastFrames ", "TxUnicastBytes ", "TxUnicastFrames ", "TxDroppedFrames ", "TxOffloadBytes ", "TxOffloadFrames ", "RxBroadcastBytes ", "RxBroadcastFrames ", "RxMulticastBytes ", "RxMulticastFrames ", "RxUnicastBytes ", "RxUnicastFrames ", "RxErrorFrames ",
/* * These are accumulated per-queue statistics and must match the * order of the fields in the queue_port_stats structure.
*/ "TSO ", "TxCsumOffload ", "RxCsumGood ", "VLANextractions ", "VLANinsertions ", "GROPackets ", "GROMerged ",
};
/* * Return the number of statistics in the specified statistics set.
*/ staticint cxgb4vf_get_sset_count(struct net_device *dev, int sset)
{ switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP;
} /*NOTREACHED*/
}
/* * Return the strings for the specified statistics set.
*/ staticvoid cxgb4vf_get_strings(struct net_device *dev,
u32 sset,
u8 *data)
{ switch (sset) { case ETH_SS_STATS:
memcpy(data, stats_strings, sizeof(stats_strings)); break;
}
}
/* * Small utility routine to accumulate queue statistics across the queues of * a "port".
*/ staticvoid collect_sge_port_stats(conststruct adapter *adapter, conststruct port_info *pi, struct queue_port_stats *stats)
{ conststruct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; conststruct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; int qs;
/* * Return the size of our register map.
*/ staticint cxgb4vf_get_regs_len(struct net_device *dev)
{ return T4VF_REGMAP_SIZE;
}
/* * Dump a block of registers, start to end inclusive, into a buffer.
*/ staticvoid reg_block_dump(struct adapter *adapter, void *regbuf, unsignedint start, unsignedint end)
{
u32 *bp = regbuf + start - T4VF_REGMAP_START;
for ( ; start <= end; start += sizeof(u32)) { /* * Avoid reading the Mailbox Control register since that * can trigger a Mailbox Ownership Arbitration cycle and * interfere with communication with the firmware.
*/ if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
*bp++ = 0xffff; else
*bp++ = t4_read_reg(adapter, start);
}
}
/* * /sys/kernel/debug/cxgb4vf support code and data. * ================================================
*/
/* * Show Firmware Mailbox Command/Reply Log * * Note that we don't do any locking when dumping the Firmware Mailbox Log so * it's possible that we can catch things during a log update and therefore * see partially corrupted log entries. But i9t's probably Good Enough(tm). * If we ever decide that we want to make sure that we're dumping a coherent * log, we'd need to perform locking in the mailbox logging and in * mboxlog_open() where we'd need to grab the entire mailbox log in one go * like we do for the Firmware Device Log. But as stated above, meh ...
*/ staticint mboxlog_show(struct seq_file *seq, void *v)
{ struct adapter *adapter = seq->private; struct mbox_cmd_log *log = adapter->mbox_log; struct mbox_cmd *entry; int entry_idx, i;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%10s %15s %5s %5s %s\n", "Seq#", "Tstamp", "Atime", "Etime", "Command/Reply"); return 0;
}
/* * Return the number of "entries" in our "file". We group the multi-Queue * sections with QPL Queue Sets per "entry". The sections of the output are: * * Ethernet RX/TX Queue Sets * Firmware Event Queue * Forwarded Interrupt Queue (if in MSI mode)
*/ staticint sge_queue_entries(conststruct adapter *adapter)
{ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
#undef R #undef T #undef S #undef R3 #undef T3 #undef S3
return 0;
}
/* * Return the number of "entries" in our "file". We group the multi-Queue * sections with QPL Queue Sets per "entry". The sections of the output are: * * Ethernet RX/TX Queue Sets * Firmware Event Queue * Forwarded Interrupt Queue (if in MSI mode)
*/ staticint sge_qstats_entries(conststruct adapter *adapter)
{ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
/* * Module and device initialization and cleanup code. * ==================================================
*/
/* * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the * directory (debugfs_root) has already been set up.
*/ staticint setup_debugfs(struct adapter *adapter)
{ int i;
BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/* * Debugfs support is best effort.
*/ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.42 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.