// SPDX-License-Identifier: GPL-2.0-or-later /* * IBM Power Virtual Ethernet Device Driver * * Copyright (C) IBM Corporation, 2003, 2010 * * Authors: Dave Larson <larson1@us.ibm.com> * Santiago Leon <santil@linux.vnet.ibm.com> * Brian King <brking@linux.vnet.ibm.com> * Robert Jennings <rcj@linux.vnet.ibm.com> * Anton Blanchard <anton@au.ibm.com>
*/
MODULE_AUTHOR("Santiago Leon ");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ibmveth_driver_version);
staticunsignedint tx_copybreak __read_mostly = 128;
module_param(tx_copybreak, uint, 0644);
MODULE_PARM_DESC(tx_copybreak, "Maximum size of packet that is copied to a new buffer on transmit");
staticunsignedint rx_copybreak __read_mostly = 128;
module_param(rx_copybreak, uint, 0644);
MODULE_PARM_DESC(rx_copybreak, "Maximum size of packet that is copied to a new buffer on receive");
staticbool old_large_send __read_mostly;
module_param(old_large_send, bool, 0444);
MODULE_PARM_DESC(old_large_send, "Use old large send method on firmware that supports the new method");
struct ibmveth_stat { char name[ETH_GSTRING_LEN]; int offset;
};
/* simple methods of getting data from the current rxq entry */ staticinline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
{ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
}
/* replenish the buffers for a pool. note that we don't need to * skb_reserve these since they are used for incoming...
*/ staticvoid ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
{ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
u32 remaining = pool->size - atomic_read(&pool->available);
u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0}; unsignedlong lpar_rc;
u32 buffers_added = 0;
u32 i, filled, batch; struct vio_dev *vdev;
dma_addr_t dma_addr; struct device *dev;
u32 index;
vdev = adapter->vdev;
dev = &vdev->dev;
mb();
batch = adapter->rx_buffers_per_hcall;
while (remaining > 0) { unsignedint free_index = pool->consumer_index;
/* Fill a batch of descriptors */ for (filled = 0; filled < min(remaining, batch); filled++) {
index = pool->free_map[free_index]; if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
adapter->replenish_add_buff_failure++;
netdev_info(adapter->netdev, "Invalid map index %u, reset\n",
index);
schedule_work(&adapter->work); break;
}
if (!pool->skbuff[index]) { struct sk_buff *skb = NULL;
/* Only update pool state after hcall succeeds */ for (i = 0; i < filled; i++) {
free_index = pool->consumer_index;
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->consumer_index++; if (pool->consumer_index >= pool->size)
pool->consumer_index = 0;
}
/* * If multi rx buffers hcall is no longer supported by FW * e.g. in the case of Live Parttion Migration
*/ if (batch > 1 && lpar_rc == H_FUNCTION) { /* * Instead of retry submit single buffer individually * here just set the max rx buffer per hcall to 1 * buffers will be respleshed next time * when ibmveth_replenish_buffer_pool() is called again * with single-buffer case
*/
netdev_info(adapter->netdev, "RX Multi buffers not supported by FW, rc=%lu\n",
lpar_rc);
adapter->rx_buffers_per_hcall = 1;
netdev_info(adapter->netdev, "Next rx replesh will fall back to single-buffer hcall\n");
} break;
}
/* * The final 8 bytes of the buffer list is a counter of frames dropped * because there was not a buffer in the buffer list capable of holding * the frame.
*/ staticvoid ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
{
__be64 *p = adapter->buffer_list_addr + 4096 - 8;
for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
if (pool->active &&
(atomic_read(&pool->available) < pool->threshold))
ibmveth_replenish_buffer_pool(adapter, pool);
}
ibmveth_update_rx_no_buffer(adapter);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */ staticvoid ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
{ int i;
kfree(pool->free_map);
pool->free_map = NULL;
if (pool->skbuff && pool->dma_addr) { for (i = 0; i < pool->size; ++i) { struct sk_buff *skb = pool->skbuff[i]; if (skb) {
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i],
pool->buff_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
pool->skbuff[i] = NULL;
}
}
}
if (pool->dma_addr) {
kfree(pool->dma_addr);
pool->dma_addr = NULL;
}
if (pool->skbuff) {
kfree(pool->skbuff);
pool->skbuff = NULL;
}
}
/** * ibmveth_remove_buffer_from_pool - remove a buffer from a pool * @adapter: adapter instance * @correlator: identifies pool and index * @reuse: whether to reuse buffer * * Return: * * %0 - success * * %-EINVAL - correlator maps to pool or index out of range * * %-EFAULT - pool and index map to null skb
*/ staticint ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
u64 correlator, bool reuse)
{ unsignedint pool = correlator >> 32; unsignedint index = correlator & 0xffffffffUL; unsignedint free_index; struct sk_buff *skb;
skb = adapter->rx_buff_pool[pool].skbuff[index]; if (WARN_ON(!skb)) {
schedule_work(&adapter->work); return -EFAULT;
}
/* if we are going to reuse the buffer then keep the pointers around * but mark index as available. replenish will see the skb pointer and * assume it is to be recycled.
*/ if (!reuse) { /* remove the skb pointer to mark free. actual freeing is done * by upper level networking after gro_recieve
*/
adapter->rx_buff_pool[pool].skbuff[index] = NULL;
/* get the current buffer on the rx queue */ staticinlinestruct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
{
u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; unsignedint pool = correlator >> 32; unsignedint index = correlator & 0xffffffffUL;
staticint ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
{
adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
GFP_KERNEL); if (!adapter->tx_ltb_ptr[idx]) {
netdev_err(adapter->netdev, "unable to allocate tx long term buffer\n"); return -ENOMEM;
}
adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
adapter->tx_ltb_ptr[idx],
adapter->tx_ltb_size,
DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
netdev_err(adapter->netdev, "unable to DMA map tx long term buffer\n");
kfree(adapter->tx_ltb_ptr[idx]);
adapter->tx_ltb_ptr[idx] = NULL; return -ENOMEM;
}
return 0;
}
staticint ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address)
{ int rc, try_again = 1;
/* * After a kexec the adapter will still be open, so our attempt to * open it will fail. So if we get a failure we free the adapter and * try again, but only once.
*/
retry:
rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma, rxq_desc.desc,
adapter->filter_list_dma, mac_address);
if (rc != H_SUCCESS && try_again) { do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
try_again = 0; goto retry;
}
return rc;
}
staticint ibmveth_open(struct net_device *netdev)
{ struct ibmveth_adapter *adapter = netdev_priv(netdev);
u64 mac_address; int rxq_entries = 1; unsignedlong lpar_rc; int rc; union ibmveth_buf_desc rxq_desc; int i; struct device *dev;
netdev_dbg(netdev, "open starting\n");
napi_enable(&adapter->napi);
for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
rc = -ENOMEM;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if (!adapter->buffer_list_addr) {
netdev_err(netdev, "unable to allocate list pages\n"); goto out;
}
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if (!adapter->filter_list_addr) {
netdev_err(netdev, "unable to allocate filter pages\n"); goto out_free_buffer_list;
}
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { if (!adapter->rx_buff_pool[i].active) continue; if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
rc = -ENOMEM; goto out_free_buffer_pools;
}
}
netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
netdev); if (rc != 0) {
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc); do {
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
for (i = 0; i < netdev->real_num_tx_queues; i++)
ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
return 0;
}
/** * ibmveth_reset - Handle scheduled reset work * * @w: pointer to work_struct embedded in adapter structure * * Context: This routine acquires rtnl_mutex and disables its NAPI through * ibmveth_close. It can't be called directly in a context that has * already acquired rtnl_mutex or disabled its NAPI, or directly from * a poll routine. * * Return: void
*/ staticvoid ibmveth_reset(struct work_struct *w)
{ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); struct net_device *netdev = adapter->netdev;
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
netdev_features_t features)
{ /* * Since the ibmveth firmware interface does not have the * concept of separate tx/rx checksum offload enable, if rx * checksum is disabled we also have to disable tx checksum * offload. Once we disable rx checksum offload, we are no * longer allowed to send tx buffers that are not properly * checksummed.
*/
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_CSUM_MASK;
} else {
adapter->fw_large_send_support = data;
adapter->large_send = data;
}
} else { /* Older firmware version of large send offload does not * support tcp6/ipv6
*/ if (data == 1) {
dev->features &= ~NETIF_F_TSO6;
netdev_info(dev, "TSO feature requires all partitions to have updated driver");
}
adapter->large_send = data;
}
/* If ndo_open has not been called yet then don't allocate, just set * desired netdev_queue's and return
*/ if (!(netdev->flags & IFF_UP)) return netif_set_real_num_tx_queues(netdev, goal);
/* We have IBMVETH_MAX_QUEUES netdev_queue's allocated * but we may need to alloc/free the ltb's.
*/
netif_tx_stop_all_queues(netdev);
/* Allocate any queue that we need */ for (i = old; i < goal; i++) { if (adapter->tx_ltb_ptr[i]) continue;
rc = ibmveth_allocate_tx_ltb(adapter, i); if (!rc) continue;
/* if something goes wrong, free everything we just allocated */
netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
old);
goal = old;
old = i; break;
}
rc = netif_set_real_num_tx_queues(netdev, goal); if (rc) {
netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
old);
goal = old;
old = i;
} /* Free any that are no longer needed */ for (i = old; i > goal; i--) { if (adapter->tx_ltb_ptr[i - 1])
ibmveth_free_tx_ltb(adapter, i - 1);
}
/* * The retry count sets a maximum for the number of broadcast and * multicast destinations within the system.
*/
retry_count = 1024;
correlator = 0; do {
ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
correlator, &correlator, mss,
adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " "with rc=%ld\n", ret); return 1;
}
return 0;
}
staticint ibmveth_is_packet_unsupported(struct sk_buff *skb, struct net_device *netdev)
{ struct ethhdr *ether_header; int ret = 0;
ether_header = eth_hdr(skb);
if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
netdev->stats.tx_dropped++;
ret = -EOPNOTSUPP;
}
return ret;
}
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{ struct ibmveth_adapter *adapter = netdev_priv(netdev); unsignedint desc_flags, total_bytes; union ibmveth_buf_desc desc; int i, queue_num = skb_get_queue_mapping(skb); unsignedlong mss = 0;
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
if (skb_is_gso(skb) && adapter->fw_large_send_support)
desc_flags |= IBMVETH_BUF_LRG_SND;
}
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { if (adapter->fw_large_send_support) {
mss = (unsignedlong)skb_shinfo(skb)->gso_size;
adapter->tx_large_packets++;
} elseif (!skb_is_gso_v6(skb)) { /* Put -1 in the IP checksum to tell phyp it * is a largesend packet. Put the mss in * the TCP checksum.
*/
ip_hdr(skb)->check = 0xffff;
tcp_hdr(skb)->check =
cpu_to_be16(skb_shinfo(skb)->gso_size);
adapter->tx_large_packets++;
}
}
/* Copy header into mapped buffer */ if (unlikely(skb->len > adapter->tx_ltb_size)) {
netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
skb->len, adapter->tx_ltb_size);
netdev->stats.tx_dropped++; goto out;
}
memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
total_bytes = skb_headlen(skb); /* Copy frags into mapped buffers */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (iph6->nexthdr == IPPROTO_TCP) {
offset = sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
} else { return;
}
} else { return;
} /* if mss is not set through Large Packet bit/mss in rx buffer, * expect that the mss will be written to the tcp header checksum.
*/
tcph = (struct tcphdr *)(skb->data + offset); if (lrg_pkt) {
skb_shinfo(skb)->gso_size = mss;
} elseif (offset) {
skb_shinfo(skb)->gso_size = ntohs(tcph->check);
tcph->check = 0;
}
if (skb_proto == ETH_P_IP) {
iph = (struct iphdr *)skb->data;
/* If the IP checksum is not offloaded and if the packet * is large send, the checksum must be rebuilt.
*/ if (iph->check == 0xffff) {
iph->check = 0;
iph->check = ip_fast_csum((unsignedchar *)iph,
iph->ihl);
}
/* When CSO is enabled the TCP checksum may have be set to NULL by * the sender given that we zeroed out TCP checksum field in * transmit path (refer ibmveth_start_xmit routine). In this case set * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will * then be recalculated by the destination NIC (CSO must be enabled * on the destination NIC). * * In an OVS environment, when a flow is not cached, specifically for a * new TCP connection, the first packet information is passed up to * the user space for finding a flow. During this process, OVS computes * checksum on the first packet when CHECKSUM_PARTIAL flag is set. * * So, re-compute TCP pseudo header checksum.
*/
restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break;
smp_rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) {
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n"); if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) break;
} else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); int lrg_pkt = ibmveth_rxq_large_packet(adapter);
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter); if (unlikely(!skb)) break;
/* if the large packet bit is set in the rx queue * descriptor, the mss will be written by PHYP eight * bytes from the start of the rx buffer, which is * skb->data at this stage
*/ if (lrg_pkt) {
__be64 *rxmss = (__be64 *)(skb->data + 8);
/* PHYP without PLSO support places a -1 in the ip * checksum for large send frames.
*/ if (skb->protocol == cpu_to_be16(ETH_P_IP)) { struct iphdr *iph = (struct iphdr *)skb->data;
if (!napi_complete_done(napi, frames_processed)) goto out;
/* We think we are done - reenable interrupts, * then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); if (WARN_ON(lpar_rc != H_SUCCESS)) {
schedule_work(&adapter->work); goto out;
}
staticint ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{ struct ibmveth_adapter *adapter = netdev_priv(dev); struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; int i, rc; int need_restart = 0;
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) break;
if (i == IBMVETH_NUM_BUFF_POOLS) return -EINVAL;
/* Deactivate all the buffer pools so that the next loop can activate
only the buffer pools necessary to hold the new MTU */ if (netif_running(adapter->netdev)) {
need_restart = 1;
ibmveth_close(adapter->netdev);
}
/* Look for an active buffer pool that can hold the new MTU */ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
WRITE_ONCE(dev->mtu, new_mtu);
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev)); if (need_restart) { return ibmveth_open(adapter->netdev);
} return 0;
}
}
if (need_restart && (rc = ibmveth_open(adapter->netdev))) return rc;
/** * ibmveth_get_desired_dma - Calculate IO memory desired by the driver * * @vdev: struct vio_dev for the device whose desired IO mem is to be returned * * Return value: * Number of bytes of IO data the driver will need to perform well.
*/ staticunsignedlong ibmveth_get_desired_dma(struct vio_dev *vdev)
{ struct net_device *netdev = dev_get_drvdata(&vdev->dev); struct ibmveth_adapter *adapter; struct iommu_table *tbl; unsignedlong ret; int i; int rxqentries = 1;
tbl = get_iommu_table_base(&vdev->dev);
/* netdev inits at probe time along with the structures we need below*/ if (netdev == NULL) return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); /* add size of mapped tx buffers */
ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { /* add the size of the active receive buffers */ if (adapter->rx_buff_pool[i].active)
ret +=
adapter->rx_buff_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
buff_size, tbl);
rxqentries += adapter->rx_buff_pool[i].size;
} /* add the size of the receive queue entries */
ret += IOMMU_PAGE_ALIGN(
rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
if (firmware_has_feature(FW_FEATURE_CMO))
memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; int error;
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
pool_count[i], pool_size[i],
pool_active[i]);
error = kobject_init_and_add(kobj, &ktype_veth_pool,
&dev->dev.kobj, "pool%d", i); if (!error)
kobject_uevent(kobj, KOBJ_ADD);
}
rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(),
IBMVETH_DEFAULT_QUEUES)); if (rc) {
netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
rc);
free_netdev(netdev); return rc;
}
adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE); for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
adapter->tx_ltb_ptr[i] = NULL;
/** * veth_pool_store - sysfs store handler for pool attributes * @kobj: kobject embedded in pool * @attr: attribute being changed * @buf: value being stored * @count: length of @buf in bytes * * Stores new value in pool attribute. Verifies the range of the new value for * size and buff_size. Verifies that at least one pool remains available to * receive MTU-sized packets. * * Context: Process context. * Takes and releases rtnl_mutex to ensure correct ordering of close * and open calls. * Return: * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools * * %-EINVAL - New pool size or buffer size is out of range * * count - Return count for success * * other - Return value from a failed ibmveth_open call
*/ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, constchar *buf, size_t count)
{ struct ibmveth_buff_pool *pool = container_of(kobj, struct ibmveth_buff_pool,
kobj); struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); bool change = false;
u32 newbuff_size;
u32 oldbuff_size; int newactive; int oldactive;
u32 newsize;
u32 oldsize; long rc;
if (attr == &veth_active_attr) { if (value && !oldactive) {
newactive = 1;
change = true;
} elseif (!value && oldactive) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; /* Make sure there is a buffer pool with buffers that
can hold a packet of the size of the MTU */ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { if (pool == &adapter->rx_buff_pool[i]) continue; if (!adapter->rx_buff_pool[i].active) continue; if (mtu <= adapter->rx_buff_pool[i].buff_size) break;
}
if (i == IBMVETH_NUM_BUFF_POOLS) {
netdev_err(netdev, "no active pool >= MTU\n");
rc = -EPERM; goto unlock_err;
}
/** * ibmveth_reset_kunit - reset routine for running in KUnit environment * * @w: pointer to work_struct embedded in adapter structure * * Context: Called in the KUnit environment. Does nothing. * * Return: void
*/ staticvoid ibmveth_reset_kunit(struct work_struct *w)
{
netdev_dbg(NULL, "reset_kunit starting\n");
netdev_dbg(NULL, "reset_kunit complete\n");
}
/** * ibmveth_remove_buffer_from_pool_test - unit test for some of * ibmveth_remove_buffer_from_pool * @test: pointer to kunit structure * * Tests the error returns from ibmveth_remove_buffer_from_pool. * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be * checked to see that these warnings happened. * * Return: void
*/ staticvoid ibmveth_remove_buffer_from_pool_test(struct kunit *test)
{ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); struct ibmveth_buff_pool *pool;
u64 correlator;
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
INIT_WORK(&adapter->work, ibmveth_reset_kunit);
/* Set sane values for buffer pools */ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
pool_count[i], pool_size[i],
pool_active[i]);
/** * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer * @test: pointer to kunit structure * * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for * the NULL returns, so dmesg should be checked to see that these warnings * happened. * * Return: void
*/ staticvoid ibmveth_rxq_get_buffer_test(struct kunit *test)
{ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL); struct ibmveth_buff_pool *pool;
/* Set sane values for buffer pools */ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
pool_count[i], pool_size[i],
pool_active[i]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.