// SPDX-License-Identifier: GPL-2.0-or-later /* * USB Network driver infrastructure * Copyright (C) 2000-2005 by David Brownell * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
*/
/* * This is a generic "USB networking" framework that works with several * kinds of full and high speed networking devices: host-to-host cables, * smart usb peripherals, and actual Ethernet adapters. * * These devices usually differ in terms of control protocols (if they * even have one!) and sometimes they define new framing to wrap or batch * Ethernet packets. Otherwise, they talk to USB pretty much the same, * so interface (un)binding, endpoint I/O queues, fault handling, and other * issues can usefully be addressed by this framework.
*/
/* * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. * Several dozen bytes of IPv4 data can fit in two such transactions. * One maximum size Ethernet packet takes twenty four of them. * For high speed, each frame comfortably fits almost 36 max size * Ethernet packets (so queues should be bigger). * * The goal is to let the USB host controller be busy for 5msec or * more before an irq is required, under load. Jumbograms change * the equation.
*/ #define MAX_QUEUE_MEMORY (60 * 1518) #define RX_QLEN(dev) ((dev)->rx_qlen) #define TX_QLEN(dev) ((dev)->tx_qlen)
// reawaken network queue this soon after stopping; else watchdog barks #define TX_TIMEOUT_JIFFIES (5*HZ)
/* throttle rx/tx briefly after some faults, so hub_wq might disconnect() * us (it polls at HZ/4 usually) before we report too many false errors.
*/ #define THROTTLE_JIFFIES (HZ/8)
/* use ethtool to change the level for any given device */ staticint msg_level = -1;
module_param (msg_level, int, 0);
MODULE_PARM_DESC (msg_level, "Override default message level");
in = out = status = NULL;
alt = intf->altsetting + tmp;
/* take the first altsetting with in-bulk + out-bulk; * remember any status endpoint, just in case; * ignore other endpoints and altsettings.
*/ for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { struct usb_host_endpoint *e; int intr = 0;
e = alt->endpoint + ep;
/* ignore endpoints which cannot transfer data */ if (!usb_endpoint_maxp(&e->desc)) continue;
switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: if (!usb_endpoint_dir_in(&e->desc)) continue;
intr = 1;
fallthrough; case USB_ENDPOINT_XFER_BULK: break; default: continue;
} if (usb_endpoint_dir_in(&e->desc)) { if (!intr && !in)
in = e; elseif (intr && !status)
status = e;
} else { if (!out)
out = e;
}
} if (in && out) break;
} if (!alt || !in || !out) return -EINVAL;
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
u8 addr[ETH_ALEN]; int tmp = -1, ret; unsignedchar buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); if (ret == 12)
tmp = hex2bin(addr, buf, 6); if (tmp < 0) {
dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); if (ret >= 0)
ret = -EINVAL; return ret;
}
eth_hw_addr_set(dev->net, addr); return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
staticbool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net)
{ /* Point to point devices which don't have a real MAC address * (or report a fake local one) have historically used the usb%d * naming. Preserve this..
*/ return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 &&
(is_zero_ether_addr(net->dev_addr) ||
is_local_ether_addr(net->dev_addr));
}
staticvoid intr_complete (struct urb *urb)
{ struct usbnet *dev = urb->context; int status = urb->status;
/* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently
*/ default:
netdev_dbg(dev->net, "intr status %d\n", status); break;
}
status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0)
netif_err(dev, timer, dev->net, "intr resubmit --> %d\n", status);
}
/* Submit the interrupt URB if not previously submitted, increasing refcount */ int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
{ int ret = 0;
WARN_ON_ONCE(dev->interrupt == NULL); if (dev->interrupt) {
mutex_lock(&dev->interrupt_mutex);
if (++dev->interrupt_count == 1)
ret = usb_submit_urb(dev->interrupt, mem_flags);
/* For resume; submit interrupt URB if previously submitted */ staticint __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
{ int ret = 0;
mutex_lock(&dev->interrupt_mutex); if (dev->interrupt_count) {
ret = usb_submit_urb(dev->interrupt, mem_flags);
dev_dbg(&dev->udev->dev, "submitted interrupt URB for resume\n");
}
mutex_unlock(&dev->interrupt_mutex); return ret;
}
/* Kill the interrupt URB if all submitters want it killed */ void usbnet_status_stop(struct usbnet *dev)
{ if (dev->interrupt) {
mutex_lock(&dev->interrupt_mutex);
WARN_ON(dev->interrupt_count == 0);
if (dev->interrupt_count && --dev->interrupt_count == 0)
usb_kill_urb(dev->interrupt);
/* For suspend; always kill interrupt URB */ staticvoid __usbnet_status_stop_force(struct usbnet *dev)
{ if (dev->interrupt) {
mutex_lock(&dev->interrupt_mutex);
usb_kill_urb(dev->interrupt);
dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
mutex_unlock(&dev->interrupt_mutex);
}
}
/* Passes this packet up the stack, updating its accounting. * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb.
*/ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); unsignedlong flags; int status;
if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
skb_queue_tail(&dev->rxq_pause, skb); return;
}
/* only update if unset to allow minidriver rx_fixup override */ if (skb->protocol == 0)
skb->protocol = eth_type_trans (skb, dev->net);
status = netif_rx (skb); if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status);
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
/* must be called if hard_mtu or rx_urb_size changed */ void usbnet_update_max_qlen(struct usbnet *dev)
{ enum usb_device_speed speed = dev->udev->speed;
if (!dev->rx_urb_size || !dev->hard_mtu) goto insanity; switch (speed) { case USB_SPEED_HIGH:
dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; break; case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: /* * Not take default 5ms qlen for super speed HC to * save memory, and iperf tests show 2.5ms qlen can * work well
*/
dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; break; default:
insanity:
dev->rx_qlen = dev->tx_qlen = 4;
}
}
EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
/*------------------------------------------------------------------------- * * Network Device Driver (peer link to "Host Device", from USB host) *
*-------------------------------------------------------------------------*/
int usbnet_change_mtu (struct net_device *net, int new_mtu)
{ struct usbnet *dev = netdev_priv(net); int ll_mtu = new_mtu + net->hard_header_len; int old_hard_mtu = dev->hard_mtu; int old_rx_urb_size = dev->rx_urb_size;
// no second zero-length packet read wanted after mtu-sized packets if ((ll_mtu % dev->maxpacket) == 0) return -EDOM;
WRITE_ONCE(net->mtu, new_mtu);
/* defer_bh() is never called with list == &dev->done. * spin_lock_nested() tells lockdep that it is OK to take * dev->done.lock here with list->lock held.
*/
spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
/* some work can't be done in tasklets, so we use keventd * * NOTE: annoying asymmetry: if it's active, schedule_work() fails, * but tasklet_schedule() doesn't. hope the failure is rare.
*/ void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags); if (!usbnet_going_away(dev)) { if (!schedule_work(&dev->kevent))
netdev_dbg(dev->net, "kevent %s may have been dropped\n",
usbnet_event_names[work]); else
netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
}
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
staticinlineint rx_process(struct usbnet *dev, struct sk_buff *skb)
{ if (dev->driver_info->rx_fixup &&
!dev->driver_info->rx_fixup (dev, skb)) { /* With RX_ASSEMBLE, rx_fixup() must update counters */ if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++; return -EPROTO;
} // else network stack removes extra byte if we forced a short packet
/* all data was already cloned from skb inside the driver */ if (dev->driver_info->flags & FLAG_MULTI_PACKET) return -EALREADY;
skb_put (skb, urb->actual_length);
state = rx_done;
entry->urb = NULL;
switch (urb_status) { /* success */ case 0: break;
/* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed.
*/ case -EPIPE:
dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
fallthrough;
/* we get controller i/o faults during hub_wq disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a hub_wq delay.
*/ case -EPROTO: case -ETIME: case -EILSEQ:
dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status);
}
block:
state = rx_cleanup;
entry->urb = urb;
urb = NULL; break;
/* data overrun ... flush fifo? */ case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
fallthrough;
default:
state = rx_cleanup;
dev->net->stats.rx_errors++;
netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break;
}
/* stop rx if packet error rate is high */ if (++dev->pkt_cnt > 30) {
dev->pkt_cnt = 0;
dev->pkt_err = 0;
} else { if (state == rx_cleanup)
dev->pkt_err++; if (dev->pkt_err > 20)
set_bit(EVENT_RX_KILL, &dev->flags);
}
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) { if (netif_running (dev->net) &&
!test_bit (EVENT_RX_HALT, &dev->flags) &&
state != unlink_start) {
rx_submit (dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev); return;
}
usb_free_urb (urb);
}
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
/* * Get reference count of the URB to avoid it to be * freed during usb_unlink_urb, which may trigger * use-after-free problem inside usb_unlink_urb since * usb_unlink_urb is always racing with .complete * handler(include defer_bh).
*/
usb_get_urb(urb);
spin_unlock_irqrestore(&q->lock, flags); // during some PM-driven resume scenarios, // these (async) unlinks complete immediately
retval = usb_unlink_urb (urb); if (retval != -EINPROGRESS && retval != 0)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval); else
count++;
usb_put_urb(urb);
spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags); return count;
}
// Flush all pending rx urbs // minidrivers may need to do this when the MTU changes
// precondition: never called in_interrupt staticvoid usbnet_terminate_urbs(struct usbnet *dev)
{
DECLARE_WAITQUEUE(wait, current); int temp;
/* ensure there are no more active urbs */
add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
wait_skb_queue_empty(&dev->rxq);
wait_skb_queue_empty(&dev->txq);
wait_skb_queue_empty(&dev->done);
netif_dbg(dev, ifdown, dev->net, "waited for %d urb completions\n", temp);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{ struct usbnet *dev = netdev_priv(net); conststruct driver_info *info = dev->driver_info; int retval, pm, mpn;
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->bh_work);
cancel_work_sync(&dev->kevent);
/* We have cyclic dependencies. Those calls are needed * to break a cycle. We cannot fall into the gaps because * we have a flag
*/
cancel_work_sync(&dev->bh_work);
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
if (info->manage_power && mpn)
info->manage_power(dev, 0); else
usb_autopm_put_interface(dev->intf);
/* ethtool methods; minidrivers may need to add some more, but * they'll probably want to use this base set.
*/
/* These methods are written on the assumption that the device * uses MII
*/ int usbnet_get_link_ksettings_mii(struct net_device *net, struct ethtool_link_ksettings *cmd)
{ struct usbnet *dev = netdev_priv(net);
/* the assumption that speed is equal on tx and rx * is deeply engrained into the networking layer. * For wireless stuff it is not true. * We assume that rx_speed matters more.
*/ if (dev->rx_speed != SPEED_UNSET)
cmd->base.speed = dev->rx_speed / 1000000; elseif (dev->tx_speed != SPEED_UNSET)
cmd->base.speed = dev->tx_speed / 1000000; else
cmd->base.speed = SPEED_UNKNOWN;
/* The standard "Universal Serial Bus Class Definitions * for Communications Devices v1.2" does not specify * anything about duplex status. * So set it DUPLEX_UNKNOWN instead of default DUPLEX_HALF.
*/
cmd->base.duplex = DUPLEX_UNKNOWN;
staticvoid __handle_link_change(struct usbnet *dev)
{ if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) return;
if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
netif_carrier_on(dev->net);
if (!netif_carrier_ok(dev->net)) { /* kill URBs for reading packets to save bus bandwidth */
unlink_urbs(dev, &dev->rxq);
/* * tx_timeout will unlink URBs for sending packets and * tx queue is stopped by netcore after link becomes off
*/
} else { /* submitting URBs for reading packets */
queue_work(system_bh_wq, &dev->bh_work);
}
/* hard_mtu or rx_urb_size may change during link change */
usbnet_update_max_qlen(dev);
staticvoid __handle_set_rx_mode(struct usbnet *dev)
{ if (dev->driver_info->set_rx_mode)
(dev->driver_info->set_rx_mode)(dev);
clear_bit(EVENT_SET_RX_MODE, &dev->flags);
}
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued.
*/ staticvoid
usbnet_deferred_kevent (struct work_struct *work)
{ struct usbnet *dev =
container_of(work, struct usbnet, kevent); int status;
/* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) {
unlink_urbs (dev, &dev->txq);
status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe;
status = usb_clear_halt (dev->udev, dev->out);
usb_autopm_put_interface(dev->intf); if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) { if (netif_msg_tx_err (dev))
fail_pipe:
netdev_err(dev->net, "can't clear tx halt, status %d\n",
status);
} else {
clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN)
netif_wake_queue (dev->net);
}
} if (test_bit (EVENT_RX_HALT, &dev->flags)) {
unlink_urbs (dev, &dev->rxq);
status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt;
status = usb_clear_halt (dev->udev, dev->in);
usb_autopm_put_interface(dev->intf); if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) { if (netif_msg_rx_err (dev))
fail_halt:
netdev_err(dev->net, "can't clear rx halt, status %d\n",
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags); if (!usbnet_going_away(dev))
queue_work(system_bh_wq, &dev->bh_work);
}
}
/* work could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1;
if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL); else
clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) {
clear_bit (EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf); if (status < 0) {
usb_free_urb(urb); goto fail_lowmem;
} if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem: if (resched) if (!usbnet_going_away(dev))
queue_work(system_bh_wq, &dev->bh_work);
}
}
if (test_bit (EVENT_LINK_RESET, &dev->flags)) { conststruct driver_info *info = dev->driver_info; int retval = 0;
unlink_urbs (dev, &dev->txq);
queue_work(system_bh_wq, &dev->bh_work); /* this needs to be handled individually because the generic layer * doesn't know what is sufficient and could not restore private * information if a remedy of an unconditional reset were used.
*/ if (dev->driver_info->recover)
(dev->driver_info->recover)(dev);
}
EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
/* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. * NOTE2: CDC NCM specification is different from CDC ECM when * handling ZLP/short packets, so cdc_ncm driver will make short * packet itself if needed.
*/ if (length % dev->maxpacket == 0) { if (!(info->flags & FLAG_SEND_ZLP)) { if (!(info->flags & FLAG_MULTI_PACKET)) {
length++; if (skb_tailroom(skb) && !urb->num_sgs) {
skb->data[skb->len] = 0;
__skb_put(skb, 1);
} elseif (urb->num_sgs)
sg_set_buf(&urb->sg[urb->num_sgs++],
dev->padding_pkt, 1);
}
} else
urb->transfer_flags |= URB_ZERO_PACKET;
}
urb->transfer_buffer_length = length;
if (info->flags & FLAG_MULTI_PACKET) { /* Driver has set number of packets and a length delta. * Calculate the complete length and ensure that it's * positive.
*/
entry->length += length; if (WARN_ON_ONCE(entry->length <= 0))
entry->length = length;
} else {
usbnet_set_skb_tx_stats(skb, 1, length);
}
#ifdef CONFIG_PM /* if this triggers the device is still a sleep */ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { /* transmission will be done in resume */
usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */
netif_stop_queue(net);
usb_put_urb(urb);
spin_unlock_irqrestore(&dev->txq.lock, flags);
netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred;
} #endif
if (retval) {
netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
dev->net->stats.tx_dropped++;
not_drop: if (skb)
dev_kfree_skb_any (skb); if (urb) {
kfree(urb->sg);
usb_free_urb(urb);
}
} else
netif_dbg(dev, tx_queued, dev->net, "> tx, len %u, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM
deferred: #endif return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(usbnet_start_xmit);
staticint rx_alloc_submit(struct usbnet *dev, gfp_t flags)
{ struct urb *urb; int i; int ret = 0;
/* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
urb = usb_alloc_urb(0, flags); if (urb != NULL) {
ret = rx_submit(dev, urb, flags); if (ret) goto err;
} else {
ret = -ENOMEM; goto err;
}
}
err: return ret;
}
while ((skb = skb_dequeue (&dev->done))) {
entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: if (rx_process(dev, skb))
usb_free_skb(skb); continue; case tx_done:
kfree(entry->urb->sg);
fallthrough; case rx_cleanup:
usb_free_skb(skb); continue; default:
netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
}
}
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
/* waiting for all pending urbs to complete? * only then can we forgo submitting anew
*/ if (waitqueue_active(&dev->wait)) { if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} elseif (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
!usbnet_going_away(dev) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) { if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) return; if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n",
temp, dev->rxq.qlen); if (dev->rxq.qlen < RX_QLEN(dev))
queue_work(system_bh_wq, &dev->bh_work);
} if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
/*------------------------------------------------------------------------- * * USB Device Driver support *
*-------------------------------------------------------------------------*/
/* usbnet already took usb runtime pm, so have to enable the feature * for usb interface, otherwise usb_autopm_get_interface may return * failure if RUNTIME_PM is enabled.
*/ if (!driver->supports_autosuspend) {
driver->supports_autosuspend = 1;
pm_runtime_enable(&udev->dev);
}
name = udev->dev.driver->name;
info = (conststruct driver_info *) prod->driver_info; if (!info) {
dev_dbg (&udev->dev, "blacklisted by %s\n", name); return -ENODEV;
}
xdev = interface_to_usbdev (udev);
interface = udev->cur_altsetting;
status = -ENOMEM;
// set up our own records
net = alloc_etherdev(sizeof(*dev)); if (!net) goto out;
/* netdev_printk() needs this so do it as early as possible */
SET_NETDEV_DEV(net, &udev->dev);
/* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case.
*/
dev->hard_mtu = net->mtu + net->hard_header_len;
net->min_mtu = 0;
net->max_mtu = ETH_MAX_MTU;
// allow device-specific bind/init procedures // NOTE net->name still not usable ... if (info->bind) {
status = info->bind (dev, udev); if (status < 0) goto out1;
/* heuristic: rename to "eth%d" if we are not sure this link * is two-host (these links keep "usb%d")
*/ if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
!usbnet_needs_usb_name_format(dev, net))
strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0)
strscpy(net->name, "wlan%d", sizeof(net->name)); /* WWAN devices should always be named "wwan%d" */ if ((dev->driver_info->flags & FLAG_WWAN) != 0)
strscpy(net->name, "wwan%d", sizeof(net->name));
/* devices that cannot do ARP */ if ((dev->driver_info->flags & FLAG_NOARP) != 0)
net->flags |= IFF_NOARP;
/* maybe the remote can't receive an Ethernet MTU */ if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
} elseif (!info->in || !info->out)
status = usbnet_get_endpoints (dev, udev); else {
u8 ep_addrs[3] = {
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
};
dev->in = usb_rcvbulkpipe (xdev, info->in);
dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT))
status = usb_set_interface (xdev,
interface->desc.bInterfaceNumber,
interface->desc.bAlternateSetting); else
status = 0;
if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
status = -EINVAL;
} if (status >= 0 && dev->status)
status = init_status (dev, udev); if (status < 0) goto out3;
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket(dev->udev, dev->out); if (dev->maxpacket == 0) { /* that is a broken device */
status = -ENODEV; goto out4;
}
/* this flags the device for user space */ if (!is_valid_ether_addr(net->dev_addr))
eth_hw_addr_random(net);
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type); if ((dev->driver_info->flags & FLAG_WWAN) != 0)
SET_NETDEV_DEVTYPE(net, &wwan_type);
/* initialize max rx_qlen and tx_qlen */
usbnet_update_max_qlen(dev);
if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
!(info->flags & FLAG_MULTI_PACKET)) {
dev->padding_pkt = kzalloc(1, GFP_KERNEL); if (!dev->padding_pkt) {
status = -ENOMEM; goto out4;
}
}
status = register_netdev (net); if (status) goto out5;
netif_info(dev, probe, dev->net, "register '%s' at usb-%s-%s, %s, %pM\n",
udev->dev.driver->name,
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description,
net->dev_addr);
// ok, it's ready to go.
usb_set_intfdata (udev, dev);
netif_device_attach (net);
if (dev->driver_info->flags & FLAG_LINK_INTR)
usbnet_link_change(dev, 0, 0);
return 0;
out5:
kfree(dev->padding_pkt);
out4:
usb_free_urb(dev->interrupt);
out3: if (info->unbind)
info->unbind (dev, udev);
out1: /* subdrivers must undo all they did in bind() if they * fail it, but we may fail later and a deferred kevent * may trigger an error resubmitting itself and, worse, * schedule a timer. So we kill it all just in case.
*/
usbnet_mark_going_away(dev);
cancel_work_sync(&dev->kevent);
timer_delete_sync(&dev->delay);
free_netdev(net);
out: return status;
}
EXPORT_SYMBOL_GPL(usbnet_probe);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { /* handle remote wakeup ASAP * we cannot race against stop
*/ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
queue_work(system_bh_wq, &dev->bh_work);
}
}
if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
usb_autopm_get_interface_no_resume(intf);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_resume);
/* * Either a subdriver implements manage_power, then it is assumed to always * be ready to be suspended or it reports the readiness to be suspended * explicitly
*/ void usbnet_device_suggests_idle(struct usbnet *dev)
{ if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
dev->intf->needs_remote_wakeup = 1;
usb_autopm_put_interface_async(dev->intf);
}
}
EXPORT_SYMBOL(usbnet_device_suggests_idle);
/* * For devices that can do without special commands
*/ int usbnet_manage_power(struct usbnet *dev, int on)
{
dev->intf->needs_remote_wakeup = on; return 0;
}
EXPORT_SYMBOL(usbnet_manage_power);
void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
{ /* update link after link is reseted */ if (link && !need_reset) {
set_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
} else {
clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
netif_carrier_off(dev->net);
}
/* * The function can't be called inside suspend/resume callback, * otherwise deadlock will be caused.
*/ int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, void *data, u16 size)
{ int ret;
if (usb_autopm_get_interface(dev->intf) < 0) return -ENODEV;
ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
data, size);
usb_autopm_put_interface(dev->intf); return ret;
}
EXPORT_SYMBOL_GPL(usbnet_read_cmd);
/* * The function can't be called inside suspend/resume callback, * otherwise deadlock will be caused.
*/ int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, constvoid *data, u16 size)
{ int ret;
if (usb_autopm_get_interface(dev->intf) < 0) return -ENODEV;
ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
data, size);
usb_autopm_put_interface(dev->intf); return ret;
}
EXPORT_SYMBOL_GPL(usbnet_write_cmd);
/* * The function can be called inside suspend/resume callback safely * and should only be called by suspend/resume callback generally.
*/ int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, void *data, u16 size)
{ return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
data, size);
}
EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
/* * The function can be called inside suspend/resume callback safely * and should only be called by suspend/resume callback generally.
*/ int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, constvoid *data,
u16 size)
{ return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
data, size);
}
EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
staticvoid usbnet_async_cmd_cb(struct urb *urb)
{ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; int status = urb->status;
if (status < 0)
dev_dbg(&urb->dev->dev, "%s failed with %d",
__func__, status);
kfree(req);
usb_free_urb(urb);
}
/* * The caller must make sure that device can't be put into suspend * state until the control URB completes.
*/ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, constvoid *data, u16 size)
{ struct usb_ctrlrequest *req; struct urb *urb; int err = -ENOMEM; void *buf = NULL;
fail_free_all:
kfree(req);
fail_free_buf:
kfree(buf); /* * avoid a double free * needed because the flag can be set only * after filling the URB
*/
urb->transfer_flags = 0;
fail_free_urb:
usb_free_urb(urb);
fail: return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.