/* DMA_INFO field at the beginning of the segment contains only some of * the information, we need to read the FCE descriptor from the end.
*/
fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
seg_len -= MT_FCE_INFO_LEN;
data += MT_DMA_HDR_LEN;
seg_len -= MT_DMA_HDR_LEN;
if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
/* do no schedule rx tasklet if urb has been unlinked * or the device has been removed
*/ switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
fallthrough; case 0: break;
}
switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default:
dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
urb->status);
fallthrough; case 0: break;
}
if (WARN_ON(q->entries <= q->used)) {
ret = -ENOSPC; goto out;
}
e = &q->e[q->end];
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC); if (ret) { /* Special-handle ENODEV from TX urb submission because it will * often be the first ENODEV we see after device is removed.
*/ if (ret == -ENODEV)
set_bit(MT7601U_STATE_REMOVED, &dev->state); else
dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
ret); goto out;
}
for (i = 0; i < N_RX_ENTRIES; i++) {
dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) return -ENOMEM;
}
return 0;
}
staticvoid mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
{ int i;
for (i = 0; i < q->entries; i++) {
usb_poison_urb(q->e[i].urb); if (q->e[i].skb)
mt7601u_tx_status(q->dev, q->e[i].skb);
usb_free_urb(q->e[i].urb);
}
}
staticvoid mt7601u_free_tx(struct mt7601u_dev *dev)
{ int i;
if (!dev->tx_q) return;
for (i = 0; i < __MT_EP_OUT_MAX; i++)
mt7601u_free_tx_queue(&dev->tx_q[i]);
}
staticint mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, struct mt7601u_tx_queue *q)
{ int i;
q->dev = dev;
q->entries = N_TX_ENTRIES;
for (i = 0; i < N_TX_ENTRIES; i++) {
q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!q->e[i].urb) return -ENOMEM;
}
return 0;
}
staticint mt7601u_alloc_tx(struct mt7601u_dev *dev)
{ int i;
dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, sizeof(*dev->tx_q), GFP_KERNEL); if (!dev->tx_q) return -ENOMEM;
for (i = 0; i < __MT_EP_OUT_MAX; i++) if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) return -ENOMEM;
return 0;
}
int mt7601u_dma_init(struct mt7601u_dev *dev)
{ int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.