if (new_tx_count == wx->tx_ring_count &&
new_rx_count == wx->rx_ring_count) return 0;
err = wx_set_state_reset(wx); if (err) return err;
if (!netif_running(wx->netdev)) { for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count; for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->count = new_rx_count;
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); if (!temp_ring) {
err = -ENOMEM; goto clear_reset;
}
switch (cmd->cmd) { case ETHTOOL_GRXRINGS:
cmd->data = wx->num_rx_queues;
ret = 0; break; case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = txgbe->fdir_filter_count;
ret = 0; break; case ETHTOOL_GRXCLSRULE:
ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); break; case ETHTOOL_GRXCLSRLALL:
ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); break; default: break;
}
return ret;
}
staticint txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
u8 *flow_type)
{ switch (fsp->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW:
*flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; break; case UDP_V4_FLOW:
*flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; break; case SCTP_V4_FLOW:
*flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; break; case IP_USER_FLOW: switch (fsp->h_u.usr_ip4_spec.proto) { case IPPROTO_TCP:
*flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; break; case IPPROTO_UDP:
*flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; break; case IPPROTO_SCTP:
*flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; break; case 0: if (!fsp->m_u.usr_ip4_spec.proto) {
*flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; break;
}
fallthrough; default: return -EINVAL;
} break; default: return -EINVAL;
}
hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
fdir_node) { /* hash found, or no matching entry */ if (rule->sw_idx >= sw_idx) break;
parent = node;
}
/* if there is an old rule occupying our place remove it */ if (rule && rule->sw_idx == sw_idx) { /* hardware filters are only configured when interface is up, * and we should not issue filter commands while the interface * is down
*/ if (netif_running(wx->netdev) &&
(!input || rule->filter.formatted.bkt_hash !=
input->filter.formatted.bkt_hash)) {
err = txgbe_fdir_erase_perfect_filter(wx,
&rule->filter,
sw_idx); if (err) return -EINVAL;
}
/* If we weren't given an input, then this was a request to delete a * filter. We should return -EINVAL if the filter wasn't found, but * return 0 if the rule was successfully deleted.
*/ if (!input) return deleted ? 0 : -EINVAL;
/* initialize node and set software index */
INIT_HLIST_NODE(&input->fdir_node);
/* add filter to the list */ if (parent)
hlist_add_behind(&input->fdir_node, parent); else
hlist_add_head(&input->fdir_node,
&txgbe->fdir_filter_list);
if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) return -EOPNOTSUPP;
/* ring_cookie is a masked into a set of queues and txgbe pools or * we use drop index
*/ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
queue = TXGBE_RDB_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
if (!vf && ring >= wx->num_rx_queues) return -EINVAL; elseif (vf && (vf > wx->num_vfs ||
ring >= wx->num_rx_queues_per_pool)) return -EINVAL;
/* Map the ring onto the absolute queue index */ if (!vf)
queue = wx->rx_ring[ring]->reg_idx; else
queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) {
wx_err(wx, "Location out of range\n"); return -EINVAL;
}
input = kzalloc(sizeof(*input), GFP_ATOMIC); if (!input) return -ENOMEM;
memset(&mask, 0, sizeof(union txgbe_atr_input));
/* set SW index */
input->sw_idx = fsp->location;
/* record flow type */ if (txgbe_flowspec_to_flow_type(fsp,
&input->filter.formatted.flow_type)) {
wx_err(wx, "Unrecognized flow type\n"); goto err_out;
}
/* determine if we need to drop or route the packet */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
input->action = TXGBE_RDB_FDIR_DROP_QUEUE; else
input->action = fsp->ring_cookie;
spin_lock(&txgbe->fdir_perfect_lock);
if (hlist_empty(&txgbe->fdir_filter_list)) { /* save mask and program input mask into HW */
memcpy(&txgbe->fdir_mask, &mask, sizeof(mask));
err = txgbe_fdir_set_input_mask(wx, &mask); if (err) goto err_unlock;
} elseif (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) {
wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); goto err_unlock;
}
/* apply mask and compute/store hash */
txgbe_atr_compute_perfect_hash(&input->filter, &mask);
/* check if new entry does not exist on filter list */ if (txgbe_match_ethtool_fdir_entry(txgbe, input)) goto err_unlock;
/* only program filters to hardware if the net device is running, as * we store the filters in the Rx buffer which is not allocated when * the device is down
*/ if (netif_running(wx->netdev)) {
err = txgbe_fdir_write_perfect_filter(wx, &input->filter,
input->sw_idx, queue); if (err) goto err_unlock;
}
switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS:
ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); break; case ETHTOOL_SRXCLSRLDEL:
ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); break; default: break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.