/* RX budget is initially set to the size of the MHI RX queue and is * used to limit the number of allocated and queued packets. It is * decremented on data queueing and incremented on data release.
*/ unsignedint rx_budget;
};
/* Increment RX budget and schedule RX refill if necessary */ staticvoid mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
{
spin_lock_bh(&mhiwwan->rx_lock);
mhiwwan->rx_budget++;
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
schedule_work(&mhiwwan->rx_refill);
spin_unlock_bh(&mhiwwan->rx_lock);
}
/* Decrement RX budget if non-zero and return true on success */ staticbool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
{ bool ret = false;
spin_lock_bh(&mhiwwan->rx_lock);
if (mhiwwan->rx_budget) {
mhiwwan->rx_budget--; if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
ret = true;
}
spin_unlock_bh(&mhiwwan->rx_lock);
return ret;
}
staticvoid __mhi_skb_destructor(struct sk_buff *skb)
{ /* RX buffer has been consumed, increase the allowed budget */
mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg);
}
while (mhi_wwan_rx_budget_dec(mhiwwan)) { struct sk_buff *skb;
skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL); if (!skb) {
mhi_wwan_rx_budget_inc(mhiwwan); break;
}
/* To prevent unlimited buffer allocation if nothing consumes * the RX buffers (passed to WWAN core), track their lifespan * to not allocate more than allowed budget.
*/
skb->destructor = __mhi_skb_destructor;
skb_shinfo(skb)->destructor_arg = mhiwwan;
if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) {
dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
kfree_skb(skb); break;
}
}
}
if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags)) return -EOPNOTSUPP;
/* Queue the packet for MHI transfer and check fullness of the queue */
spin_lock_bh(&mhiwwan->tx_lock);
ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
wwan_port_txoff(port);
spin_unlock_bh(&mhiwwan->tx_lock);
/* MHI core has done with the buffer, release it */
consume_skb(skb);
/* There is likely new slot available in the MHI queue, re-allow TX */
spin_lock_bh(&mhiwwan->tx_lock); if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
wwan_port_txon(port);
spin_unlock_bh(&mhiwwan->tx_lock);
}
if (mhi_dev->dl_chan)
set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags); if (mhi_dev->ul_chan)
set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags);
dev_set_drvdata(&mhi_dev->dev, mhiwwan);
/* Register as a wwan port, id->driver_data contains wwan port type */
port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
&wwan_pops, NULL, mhiwwan); if (IS_ERR(port)) {
kfree(mhiwwan); return PTR_ERR(port);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.