/* if allocation did fail upper functions still may call us */ if (unlikely(!bch->rx_skb)) return; if (unlikely(!bch->rx_skb->len)) { /* we have no data to send - this may happen after recovery * from overflow or too small allocation.
* We need to free the buffer here */
dev_kfree_skb(bch->rx_skb);
bch->rx_skb = NULL;
} else { if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
(bch->rx_skb->len < bch->minlen) && !force) return;
hh = mISDN_HEAD_P(bch->rx_skb);
hh->prim = PH_DATA_IND;
hh->id = id; if (bch->rcount >= 64) {
printk(KERN_WARNING "B%d receive queue overflow - flushing!\n",
bch->nr);
skb_queue_purge(&bch->rqueue);
}
bch->rcount++;
skb_queue_tail(&bch->rqueue, bch->rx_skb);
bch->rx_skb = NULL;
schedule_event(bch, FLG_RECVQUEUE);
}
}
EXPORT_SYMBOL(recv_Bchannel);
int
dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
{ /* check oversize */ if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n", __func__); return -EINVAL;
} if (skb->len > ch->maxlen) {
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
__func__, skb->len, ch->maxlen); return -EINVAL;
} /* HW lock must be obtained */ if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
skb_queue_tail(&ch->squeue, skb); return 0;
} else { /* write to fifo */
ch->tx_skb = skb;
ch->tx_idx = 0; return 1;
}
}
EXPORT_SYMBOL(dchannel_senddata);
int
bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
{
/* check oversize */ if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n", __func__); return -EINVAL;
} if (skb->len > ch->maxlen) {
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
__func__, skb->len, ch->maxlen); return -EINVAL;
} /* HW lock must be obtained */ /* check for pending next_skb */ if (ch->next_skb) {
printk(KERN_WARNING "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
__func__, skb->len, ch->next_skb->len); return -EBUSY;
} if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
ch->next_skb = skb; return 0;
} else { /* write to fifo */
ch->tx_skb = skb;
ch->tx_idx = 0;
confirm_Bsend(ch); return 1;
}
}
EXPORT_SYMBOL(bchannel_senddata);
/* The function allocates a new receive skb on demand with a size for the * requirements of the current protocol. It returns the tailroom of the * receive skb or an error.
*/ int
bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
{ int len;
if (bch->rx_skb) {
len = skb_tailroom(bch->rx_skb); if (len < reqlen) {
pr_warn("B%d no space for %d (only %d) bytes\n",
bch->nr, reqlen, len); if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { /* send what we have now and try a new buffer */
recv_Bchannel(bch, 0, true);
} else { /* on HDLC we have to drop too big frames */ return -EMSGSIZE;
}
} else { return len;
}
} /* update current min/max length first */ if (unlikely(bch->maxlen != bch->next_maxlen))
bch->maxlen = bch->next_maxlen; if (unlikely(bch->minlen != bch->next_minlen))
bch->minlen = bch->next_minlen; if (unlikely(reqlen > bch->maxlen)) return -EMSGSIZE; if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { if (reqlen >= bch->minlen) {
len = reqlen;
} else {
len = 2 * bch->minlen; if (len > bch->maxlen)
len = bch->maxlen;
}
} else { /* with HDLC we do not know the length yet */
len = bch->maxlen;
}
bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC); if (!bch->rx_skb) {
pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
len = -ENOMEM;
} return len;
}
EXPORT_SYMBOL(bchannel_get_rxbuf);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.