// SPDX-License-Identifier: GPL-2.0-or-later /* * net/core/ethtool.c - Ethtool ioctl handler * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> * * This file is where we call all the ethtool_ops commands to get * the information ethtool needs.
*/
/* State held across locks and calls for commands which have devlink fallback */ struct ethtool_devlink_compat { struct devlink *devlink; union { struct ethtool_flash efl; struct ethtool_drvinfo info;
};
};
/* * Some useful ethtool_ops methods that're device independent. * If we find that all drivers want to do the same thing here, * we can turn these into dev_() function calls.
*/
u32 ethtool_op_get_link(struct net_device *dev)
{ /* Synchronize carrier state with link watch, see also rtnl_getlink() */
__linkwatch_sync_dev(dev);
switch (eth_cmd) { case ETHTOOL_GTXCSUM: case ETHTOOL_STXCSUM: return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC |
NETIF_F_SCTP_CRC; case ETHTOOL_GRXCSUM: case ETHTOOL_SRXCSUM: return NETIF_F_RXCSUM; case ETHTOOL_GSG: case ETHTOOL_SSG: return NETIF_F_SG | NETIF_F_FRAGLIST; case ETHTOOL_GTSO: case ETHTOOL_STSO: return NETIF_F_ALL_TSO; case ETHTOOL_GGSO: case ETHTOOL_SGSO: return NETIF_F_GSO; case ETHTOOL_GGRO: case ETHTOOL_SGRO: return NETIF_F_GRO; default:
BUG();
}
}
if (data & ETH_FLAG_LRO)
features |= NETIF_F_LRO; if (data & ETH_FLAG_RXVLAN)
features |= NETIF_F_HW_VLAN_CTAG_RX; if (data & ETH_FLAG_TXVLAN)
features |= NETIF_F_HW_VLAN_CTAG_TX; if (data & ETH_FLAG_NTUPLE)
features |= NETIF_F_NTUPLE; if (data & ETH_FLAG_RXHASH)
features |= NETIF_F_RXHASH;
/* allow changing only bits set in hw_features */
changed = (features ^ dev->features) & ETH_ALL_FEATURES; if (changed & ~dev->hw_features) return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
/* Given two link masks, AND them together and save the result in dst. */ void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, struct ethtool_link_ksettings *src)
{ unsignedint size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); unsignedint idx = 0;
/* number of 32-bit words to store the user's link mode bitmaps */ #define __ETHTOOL_LINK_MODE_MASK_NU32 \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
/* convert ethtool_link_usettings in user space to a kernel internal * ethtool_link_ksettings. return 0 on success, errno on error.
*/ staticint load_link_ksettings_from_user(struct ethtool_link_ksettings *to, constvoid __user *from)
{ struct ethtool_link_usettings link_usettings;
if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) return -EFAULT;
/* Check if the user is trying to change anything besides speed/duplex */ bool ethtool_virtdev_validate_cmd(conststruct ethtool_link_ksettings *cmd)
{ struct ethtool_link_settings base2 = {};
if (__ETHTOOL_LINK_MODE_MASK_NU32
!= link_ksettings.base.link_mode_masks_nwords) { /* wrong link mode nbits requested */
memset(&link_ksettings, 0, sizeof(link_ksettings));
link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS; /* send back number of words required as negative val */
compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX, "need too many bits for link modes!");
link_ksettings.base.link_mode_masks_nwords
= -((s8)__ETHTOOL_LINK_MODE_MASK_NU32);
/* copy the base fields back to user, not the link * mode bitmaps
*/ if (copy_to_user(useraddr, &link_ksettings.base, sizeof(link_ksettings.base))) return -EFAULT;
return 0;
}
/* handshake successful: user/kernel agree on * link_mode_masks_nwords
*/
/* make sure we tell the right values to user */
link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS;
link_ksettings.base.link_mode_masks_nwords
= __ETHTOOL_LINK_MODE_MASK_NU32;
link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED;
link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
link_ksettings.base.rate_matching = RATE_MATCH_NONE;
if (!dev->ethtool_ops->set_link_ksettings) return -EOPNOTSUPP;
/* make sure nbits field has expected value */ if (copy_from_user(&link_ksettings.base, useraddr, sizeof(link_ksettings.base))) return -EFAULT;
if (__ETHTOOL_LINK_MODE_MASK_NU32
!= link_ksettings.base.link_mode_masks_nwords) return -EINVAL;
/* copy the whole structure, now that we know it has expected * format
*/
err = load_link_ksettings_from_user(&link_ksettings, useraddr); if (err) return err;
/* re-check nwords field, just in case */ if (__ETHTOOL_LINK_MODE_MASK_NU32
!= link_ksettings.base.link_mode_masks_nwords) return -EINVAL;
if (link_ksettings.base.master_slave_cfg ||
link_ksettings.base.master_slave_state) return -EINVAL;
/* Query device for its ethtool_cmd settings. * * Backward compatibility note: for compatibility with legacy ethtool, this is * now implemented via get_link_ksettings. When driver reports higher link mode * bits, a kernel warning is logged once (with name of 1st driver/device) to * recommend user to upgrade ethtool, but the command is successful (only the * lower link mode bits reported back to user). Deprecated fields from * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero.
*/ staticint ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{ struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; int err;
ASSERT_RTNL(); if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP;
if (dev->ethtool->module_fw_flash_in_progress) return -EBUSY;
/* send a sensible cmd tag back to user */
cmd.cmd = ETHTOOL_GSET;
if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT;
return 0;
}
/* Update device link settings with given ethtool_cmd. * * Backward compatibility note: for compatibility with legacy ethtool, this is * now always implemented via set_link_settings. When user's request updates * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel * warning is logged once (with name of 1st driver/device) to recommend user to * upgrade ethtool, and the request is rejected.
*/ staticint ethtool_set_settings(struct net_device *dev, void __user *useraddr)
{ struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; int ret;
ASSERT_RTNL();
if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; if (!dev->ethtool_ops->set_link_ksettings) return -EOPNOTSUPP;
if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) return -EINVAL;
link_ksettings.base.link_mode_masks_nwords =
__ETHTOOL_LINK_MODE_MASK_NU32;
ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); if (ret >= 0) {
ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF);
ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF);
} return ret;
}
info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER); if (!info_buf) return -ENOMEM;
/* * fill return buffer based on input bitmask and successful * get_sset_count return
*/ for (i = 0; i < 64; i++) { if (!(sset_mask & (1ULL << i))) continue;
/* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. * On non-x86, no conversion should be needed.
*/
BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) && sizeof(struct compat_ethtool_rxnfc) != sizeof(struct ethtool_rxnfc));
BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(useraddr->fs.m_ext) !=
offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext));
BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) -
offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
offsetof(struct ethtool_rxnfc, fs.location) -
offsetof(struct ethtool_rxnfc, fs.ring_cookie));
if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc)))) return -EFAULT;
if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc)))) return -EFAULT;
return 0;
}
staticint ethtool_rxnfc_copy_struct(u32 cmd, struct ethtool_rxnfc *info,
size_t *info_size, void __user *useraddr)
{ /* struct ethtool_rxnfc was originally defined for * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data * members. User-space might still be using that * definition.
*/ if (cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH)
*info_size = (offsetof(struct ethtool_rxnfc, data) + sizeof(info->data));
if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) return -EFAULT;
if ((cmd == ETHTOOL_GRXFH || cmd == ETHTOOL_SRXFH) && info->flow_type & FLOW_RSS) {
*info_size = sizeof(*info); if (ethtool_rxnfc_copy_from_user(info, useraddr, *info_size)) return -EFAULT; /* Since malicious users may modify the original data, * we need to check whether FLOW_RSS is still requested.
*/ if (!(info->flow_type & FLOW_RSS)) return -EINVAL;
}
if (compat_need_64bit_alignment_fixup()) {
ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size,
rule_buf);
useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
} else {
ret = copy_to_user(useraddr, rxnfc, size);
useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
}
if (ret) return -EFAULT;
if (rule_buf) { if (copy_to_user(useraddr, rule_buf,
rxnfc->rule_cnt * sizeof(u32))) return -EFAULT;
}
return 0;
}
staticbool flow_type_hashable(u32 flow_type)
{ switch (flow_type) { case ETHER_FLOW: case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV4_FLOW: case IPV6_FLOW: case GTPU_V4_FLOW: case GTPU_V6_FLOW: case GTPC_V4_FLOW: case GTPC_V6_FLOW: case GTPC_TEID_V4_FLOW: case GTPC_TEID_V6_FLOW: case GTPU_EH_V4_FLOW: case GTPU_EH_V6_FLOW: case GTPU_UL_V4_FLOW: case GTPU_UL_V6_FLOW: case GTPU_DL_V4_FLOW: case GTPU_DL_V6_FLOW: returntrue;
}
returnfalse;
}
/* When adding a new type, update the assert and, if it's hashable, add it to * the flow_type_hashable switch case.
*/
static_assert(GTPU_DL_V6_FLOW + 1 == __FLOW_TYPE_COUNT);
staticint ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh)
{ /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: * 1 - no other fields besides IP src/dst and/or L4 src/dst are set * 2 - If src is set, dst must also be set
*/ if ((input_xfrm != RXH_XFRM_NO_CHANGE &&
input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) &&
!ethtool_rxfh_config_is_sym(rxfh)) return -EINVAL;
rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr); if (rc) return rc;
if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { /* Nonzero ring with RSS only makes sense * if NIC adds them together
*/ if (!ops->cap_rss_rxnfc_adds &&
ethtool_get_flow_spec_ring(info.fs.ring_cookie)) return -EINVAL;
if (info.rss_context &&
!xa_load(&dev->ethtool->rss_ctx, info.rss_context)) return -EINVAL;
}
rc = ops->set_rxnfc(dev, &info); if (rc) return rc;
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr)
{ struct ethtool_rxfh_param rxfh = {};
u32 user_size; int ret;
if (!dev->ethtool_ops->get_rxfh_indir_size ||
!dev->ethtool_ops->get_rxfh) return -EOPNOTSUPP;
rxfh.indir_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (rxfh.indir_size == 0) return -EOPNOTSUPP;
if (copy_from_user(&user_size,
useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
&rxfh.indir_size, sizeof(rxfh.indir_size))) return -EFAULT;
/* If the user buffer size is 0, this is just a query for the * device table size. Otherwise, if it's smaller than the * device table size it's an error.
*/ if (user_size < rxfh.indir_size) return user_size == 0 ? 0 : -EINVAL;
rxfh.indir = kcalloc(rxfh.indir_size, sizeof(rxfh.indir[0]), GFP_USER); if (!rxfh.indir) return -ENOMEM;
mutex_lock(&dev->ethtool->rss_lock);
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
mutex_unlock(&dev->ethtool->rss_lock); if (ret) goto out; if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh_indir, ring_index[0]),
rxfh.indir, rxfh.indir_size * sizeof(*rxfh.indir)))
ret = -EFAULT;
if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
!ops->get_rxnfc) return -EOPNOTSUPP;
rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); if (rxfh_dev.indir_size == 0) return -EOPNOTSUPP;
if (copy_from_user(&user_size,
useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT;
if (user_size != 0 && user_size != rxfh_dev.indir_size) return -EINVAL;
rxfh_dev.indir = kcalloc(rxfh_dev.indir_size, sizeof(rxfh_dev.indir[0]), GFP_USER); if (!rxfh_dev.indir) return -ENOMEM;
rx_rings.cmd = ETHTOOL_GRXRINGS;
ret = ops->get_rxnfc(dev, &rx_rings, NULL); if (ret) goto out;
if (user_size == 0) {
u32 *indir = rxfh_dev.indir;
for (i = 0; i < rxfh_dev.indir_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + ringidx_offset,
&rx_rings,
rxfh_dev.indir_size); if (ret) goto out;
}
rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE;
mutex_lock(&dev->ethtool->rss_lock);
ret = ops->set_rxfh(dev, &rxfh_dev, extack); if (ret) goto out_unlock;
/* indicate whether rxfh was set to default */ if (user_size == 0)
dev->priv_flags &= ~IFF_RXFH_CONFIGURED; else
dev->priv_flags |= IFF_RXFH_CONFIGURED;
if (ops->get_rxfh_indir_size)
rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev); if (ops->get_rxfh_key_size)
rxfh_dev.key_size = ops->get_rxfh_key_size(dev);
/* Check that reserved fields are 0 for now */ if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32) return -EINVAL; /* Most drivers don't handle rss_context, check it's 0 as well */ if (rxfh.rss_context && !ops->create_rxfh_context) return -EOPNOTSUPP;
/* Must request at least one change: indir size, hash key, function * or input transformation. * There's no need for any of it in case of context creation.
*/ if (!create &&
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE &&
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE)) return -EINVAL;
/* Check settings which may be global rather than per RSS-context */ if (rxfh.rss_context && !ops->rxfh_per_ctx_key) if (rxfh.key_size ||
(rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) ||
(rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)) return -EOPNOTSUPP;
rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER); if (!rss_config) return -ENOMEM;
rx_rings.cmd = ETHTOOL_GRXRINGS;
ret = ops->get_rxnfc(dev, &rx_rings, NULL); if (ret) goto out_free;
/* rxfh.indir_size == 0 means reset the indir table to default (master * context) or delete the context (other RSS contexts). * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
*/ if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
user_indir_len = indir_bytes;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + rss_cfg_offset,
&rx_rings,
rxfh.indir_size); if (ret) goto out_free;
} elseif (rxfh.indir_size == 0) { if (rxfh.rss_context == 0) {
u32 *indir;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
indir = rxfh_dev.indir; for (i = 0; i < dev_indir_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
rxfh_dev.rss_delete = true;
}
}
if (rxfh.key_size) {
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes; if (copy_from_user(rxfh_dev.key,
useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT; goto out_free;
}
}
mutex_lock(&dev->ethtool->rss_lock);
ret = ethtool_check_flow_types(dev, rxfh.input_xfrm); if (ret) goto out_unlock;
if (rxfh.rss_context && rxfh_dev.rss_delete) {
ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context); if (ret) goto out_unlock;
}
if (create) {
u32 limit, ctx_id;
if (rxfh_dev.rss_delete) {
ret = -EINVAL; goto out_unlock;
}
ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); if (!ctx) {
ret = -ENOMEM; goto out_unlock;
}
if (coalesce->rx_coalesce_usecs)
nonzero_params |= ETHTOOL_COALESCE_RX_USECS; if (coalesce->rx_max_coalesced_frames)
nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES; if (coalesce->rx_coalesce_usecs_irq)
nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ; if (coalesce->rx_max_coalesced_frames_irq)
nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ; if (coalesce->tx_coalesce_usecs)
nonzero_params |= ETHTOOL_COALESCE_TX_USECS; if (coalesce->tx_max_coalesced_frames)
nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES; if (coalesce->tx_coalesce_usecs_irq)
nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ; if (coalesce->tx_max_coalesced_frames_irq)
nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ; if (coalesce->stats_block_coalesce_usecs)
nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS; if (coalesce->use_adaptive_rx_coalesce)
nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX; if (coalesce->use_adaptive_tx_coalesce)
nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX; if (coalesce->pkt_rate_low)
nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW; if (coalesce->rx_coalesce_usecs_low)
nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW; if (coalesce->rx_max_coalesced_frames_low)
nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW; if (coalesce->tx_coalesce_usecs_low)
nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW; if (coalesce->tx_max_coalesced_frames_low)
nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW; if (coalesce->pkt_rate_high)
nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH; if (coalesce->rx_coalesce_usecs_high)
nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH; if (coalesce->rx_max_coalesced_frames_high)
nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH; if (coalesce->tx_coalesce_usecs_high)
nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH; if (coalesce->tx_max_coalesced_frames_high)
nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH; if (coalesce->rate_sample_interval)
nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL;
/* ensure new ring parameters are within the maximums */ if (ringparam.rx_pending > max.rx_max_pending ||
ringparam.rx_mini_pending > max.rx_mini_max_pending ||
ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
ringparam.tx_pending > max.tx_max_pending) return -EINVAL;
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, NULL); if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF); return ret;
}
if (copy_from_user(&id, useraddr, sizeof(id))) return -EFAULT;
rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); if (rc < 0) return rc;
/* Drop the RTNL lock while waiting, but prevent reentry or * removal of the device.
*/
busy = true;
netdev_hold(dev, &dev_tracker, GFP_KERNEL);
netdev_unlock_ops(dev);
rtnl_unlock();
if (rc == 0) { /* Driver will handle this itself */
schedule_timeout_interruptible(
id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
} else { /* Driver expects to be called at twice the frequency in rc */ int n = rc * 2, interval = HZ / n;
u64 count = mul_u32_u32(n, id.data);
u64 i = 0;
do {
rtnl_lock();
netdev_lock_ops(dev);
rc = ops->set_phys_id(dev,
(i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
netdev_unlock_ops(dev);
rtnl_unlock(); if (rc) break;
schedule_timeout_interruptible(interval);
} while (!signal_pending(current) && (!id.data || i < count));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.