static u32 get_sw_data(int index, struct knav_dma_desc *desc)
{ /* No Endian conversion needed as this data is untouched by hw */ return desc->sw_data[index];
}
/* use these macros to get sw data */ #define GET_SW_DATA0(desc) get_sw_data(0, desc) #define GET_SW_DATA1(desc) get_sw_data(1, desc) #define GET_SW_DATA2(desc) get_sw_data(2, desc) #define GET_SW_DATA3(desc) get_sw_data(3, desc)
staticvoid set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
{ /* No Endian conversion needed as this data is untouched by hw */
desc->sw_data[index] = data;
}
/* use these macros to set sw data */ #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc) #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc) #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc) #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
staticvoid set_words(u32 *words, int num_words, __le32 *desc)
{ int i;
for (i = 0; i < num_words; i++)
desc[i] = cpu_to_le32(words[i]);
}
/* Read the e-fuse value as 32 bit values to be endian independent */ staticint emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
{ unsignedint addr0, addr1;
/* Find this module in the sub-tree for this device */
devices = of_get_child_by_name(node, "netcp-devices"); if (!devices) {
dev_err(dev, "could not find netcp-devices node\n"); return NETCP_MOD_PROBE_SKIPPED;
}
if (of_property_read_string(child, "label", &name) < 0) {
snprintf(node_name, sizeof(node_name), "%pOFn", child);
name = node_name;
} if (!strcasecmp(module->name, name)) break;
}
of_node_put(devices); /* If module not used for this device, skip it */ if (!child) {
dev_warn(dev, "module(%s) not used for device\n", module->name); return NETCP_MOD_PROBE_SKIPPED;
}
ret = module->attach(inst_modpriv->module_priv,
netcp_intf->ndev, interface,
&intf_modpriv->module_priv);
of_node_put(interface); if (ret) {
dev_dbg(dev, "Attach of module %s declined with %d\n",
module->name, ret);
list_del(&intf_modpriv->intf_list);
devm_kfree(dev, intf_modpriv); continue;
}
}
/* Now register the interface with netdev */
list_for_each_entry(netcp_intf,
&netcp_device->interface_head,
interface_list) { /* If interface not registered then register now */ if (!netcp_intf->netdev_registered) {
ret = netcp_register_interface(netcp_intf); if (ret) return -ENODEV;
}
} return 0;
}
int netcp_register_module(struct netcp_module *module)
{ struct netcp_device *netcp_device; struct netcp_module *tmp; int ret;
if (!module->name) {
WARN(1, "error registering netcp module: no name\n"); return -EINVAL;
}
if (!module->probe) {
WARN(1, "error registering netcp module: no probe\n"); return -EINVAL;
}
/* Remove the module from the module list */
for_each_netcp_module(module_tmp) { if (module == module_tmp) {
list_del(&module->module_list); break;
}
}
while (dma_desc) {
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) {
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); break;
}
get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); /* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
buf_ptr = (void *)GET_SW_DATA0(ndesc);
buf_len = (int)GET_SW_DATA1(desc);
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(buf_ptr);
knav_pool_desc_put(netcp->rx_pool, desc);
} /* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
buf_ptr = (void *)GET_SW_DATA0(desc);
buf_len = (int)GET_SW_DATA1(desc);
if (buf_ptr)
netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
knav_pool_desc_put(netcp->rx_pool, desc);
}
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) return -1;
desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); return 0;
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); /* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
org_buf_ptr = (void *)GET_SW_DATA0(desc);
org_buf_len = (int)GET_SW_DATA1(desc);
if (unlikely(!org_buf_ptr)) {
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); goto free_desc;
}
/* Build a new sk_buff for the primary buffer */
skb = build_skb(org_buf_ptr, org_buf_len); if (unlikely(!skb)) {
dev_err(netcp->ndev_dev, "build_skb() failed\n"); goto free_desc;
}
/* update data, tail and len */
skb_reserve(skb, NETCP_SOP_OFFSET);
__skb_put(skb, buf_len);
/* Fill in the page fragment list */ while (dma_desc) { struct page *page;
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); if (unlikely(!ndesc)) {
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); goto free_desc;
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); /* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
page = (struct page *)GET_SW_DATA0(ndesc);
/* Free the descriptor */
knav_pool_desc_put(netcp->rx_pool, ndesc);
}
/* check for packet len and warn */ if (unlikely(pkt_sz != accum_sz))
dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
pkt_sz, accum_sz);
/* Newer version of the Ethernet switch can trim the Ethernet FCS * from the packet and is indicated in hw_cap. So trim it only for * older h/w
*/ if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
__pskb_trim(skb, skb->len - ETH_FCS_LEN);
staticint netcp_process_rx_packets(struct netcp_intf *netcp, unsignedint budget)
{ int i;
for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
; return i;
}
/* Release descriptors and attached buffers from Rx FDQ */ staticvoid netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
{ struct knav_dma_desc *desc; unsignedint buf_len, dma_sz;
dma_addr_t dma; void *buf_ptr;
/* Allocate descriptor */ while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); continue;
}
get_org_pkt_info(&dma, &buf_len, desc); /* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
buf_ptr = (void *)GET_SW_DATA0(desc);
if (unlikely(!dma)) {
dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
knav_pool_desc_put(netcp->rx_pool, desc); continue;
}
if (unlikely(!buf_ptr)) {
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
knav_pool_desc_put(netcp->rx_pool, desc); continue;
}
/* warning!!!! We are saving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
sw_data[0] = (u32)bufptr;
} else { /* Allocate a secondary receive queue entry */
page = alloc_page(GFP_ATOMIC | GFP_DMA); if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail;
}
buf_len = PAGE_SIZE;
dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); /* warning!!!! We are saving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
sw_data[0] = (u32)page;
sw_data[1] = 0;
}
/* Refill Rx FDQ with descriptors & attached buffers */ staticvoid netcp_rxpool_refill(struct netcp_intf *netcp)
{
u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; int i, ret = 0;
/* Calculate the FDQ deficit and refill */ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
fdq_deficit[i] = netcp->rx_queue_depths[i] -
knav_queue_get_count(netcp->rx_fdq[i]);
while (fdq_deficit[i]-- && !ret)
ret = netcp_allocate_rx_buf(netcp, i);
} /* end for fdqs */
}
while (budget--) {
dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); if (!dma) break;
desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
tx_stats->tx_errors++; continue;
}
/* warning!!!! We are retrieving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
skb = (struct sk_buff *)GET_SW_DATA0(desc);
netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) {
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
tx_stats->tx_errors++; continue;
}
tx_cb = (struct netcp_tx_cb *)skb->cb; if (tx_cb->txtstamp)
tx_cb->txtstamp(tx_cb->ts_context, skb);
/* frag list based linkage is not supported for now. */ if (skb_shinfo(skb)->frag_list) {
dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n"); goto free_descs;
}
/* Find out where to inject the packet for transmission */
list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
&p_info); if (unlikely(ret != 0)) {
dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
tx_hook->order, ret);
ret = (ret < 0) ? ret : NETDEV_TX_OK; goto out;
}
}
/* Make sure some TX hook claimed the packet */
tx_pipe = p_info.tx_pipe; if (!tx_pipe) {
dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
ret = -ENXIO; goto out;
}
/* update descriptor */ if (p_info.psdata_len) { /* psdata points to both native-endian and device-endian data */
__le32 *psdata = (void __force *)p_info.psdata;
set_words(&tmp, 1, &desc->packet_info); /* warning!!!! We are saving the virtual ptr in the sw_data * field as a 32bit value. Will not work on 64bit machines
*/
SET_SW_DATA0((u32)skb, desc);
/* submit packet descriptor */
ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
&dma_sz); if (unlikely(ret)) {
dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
ret = -ENOMEM; goto out;
}
skb_tx_timestamp(skb);
knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
out: return ret;
}
/* Submit the packet */ static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_stats *tx_stats = &netcp->stats; int subqueue = skb_get_queue_mapping(skb); struct knav_dma_desc *desc; int desc_count, ret = 0;
if (unlikely(skb->len <= 0)) {
dev_kfree_skb(skb); return NETDEV_TX_OK;
}
if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); if (ret < 0) { /* If we get here, the skb has already been dropped */
dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
ret);
tx_stats->tx_dropped++; return ret;
}
skb->len = NETCP_MIN_PACKET_SIZE;
}
desc = netcp_tx_map_skb(skb, netcp); if (unlikely(!desc)) {
netif_stop_subqueue(ndev, subqueue);
ret = -ENOBUFS; goto drop;
}
ret = netcp_tx_submit_skb(netcp, skb, desc); if (ret) goto drop;
/* Check Tx pool count & stop subqueue if needed */
desc_count = knav_pool_count(netcp->tx_pool); if (desc_count < netcp->tx_pause_threshold) {
dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
netif_stop_subqueue(ndev, subqueue);
} return NETDEV_TX_OK;
/* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) ||
(dev->real_num_tx_queues < num_tc)) return -EINVAL;
/* Configure traffic class to queue mappings */ if (num_tc) {
netdev_set_num_tc(dev, num_tc); for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(dev, i, 1, i);
} else {
netdev_reset_tc(dev);
}
ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac); if (efuse_mac) { if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
dev_err(dev, "could not find efuse-mac reg resource\n");
ret = -ENODEV; goto quit;
}
size = resource_size(&res);
if (!devm_request_mem_region(dev, res.start, size,
dev_name(dev))) {
dev_err(dev, "could not reserve resource\n");
ret = -ENOMEM; goto quit;
}
efuse = devm_ioremap(dev, res.start, size); if (!efuse) {
dev_err(dev, "could not map resource\n");
devm_release_mem_region(dev, res.start, size);
ret = -ENOMEM; goto quit;
}
emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr))
eth_hw_addr_set(ndev, efuse_mac_addr); else
eth_hw_addr_random(ndev);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
} else {
ret = of_get_ethdev_address(node_interface, ndev); if (ret)
eth_hw_addr_random(ndev);
}
ret = of_property_read_string(node_interface, "rx-channel",
&netcp->dma_chan_name); if (ret < 0) {
dev_err(dev, "missing \"rx-channel\" parameter\n");
ret = -ENODEV; goto quit;
}
ret = of_property_read_u32(node_interface, "rx-queue",
&netcp->rx_queue_id); if (ret < 0) {
dev_warn(dev, "missing \"rx-queue\" parameter\n");
netcp->rx_queue_id = KNAV_QUEUE_QPEND;
}
ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
netcp->rx_queue_depths,
KNAV_DMA_FDQ_PER_CHAN); if (ret < 0) {
dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
netcp->rx_queue_depths[0] = 128;
}
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
ret = -ENODEV; goto quit;
}
netcp->rx_pool_size = temp[0];
netcp->rx_pool_region_id = temp[1];
ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2); if (ret < 0) {
dev_err(dev, "missing \"tx-pool\" parameter\n");
ret = -ENODEV; goto quit;
}
netcp->tx_pool_size = temp[0];
netcp->tx_pool_region_id = temp[1];
if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
dev_err(dev, "tx-pool size too small, must be at least %u\n",
(unsignedint)MAX_SKB_FRAGS);
ret = -ENODEV; goto quit;
}
ret = of_property_read_u32(node_interface, "tx-completion-queue",
&netcp->tx_compl_qid); if (ret < 0) {
dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
}
/* NAPI register */
netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll);
netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
/* Notify each of the modules that the interface is going away */
list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
intf_list) {
module = intf_modpriv->netcp_module;
dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
module->name); if (module->release)
module->release(intf_modpriv->module_priv);
list_del(&intf_modpriv->intf_list);
}
WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
ndev->name);
/* now that all modules are removed, clean up the interfaces */
list_for_each_entry_safe(netcp_intf, netcp_tmp,
&netcp_device->interface_head,
interface_list) {
netcp_delete_interface(netcp_device, netcp_intf->ndev);
}
WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n", pdev->name);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.