/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'. If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'. If 'steal' != NULL, then buffers may also taken from that queue if needed, but only if 'from' is the free queue.
The buffer is automatically cleared if it goes to the free queue. It is also cleared if buffers need to be taken from the 'steal' queue and the 'from' queue is the free queue.
When 'from' is q_free, then needed_bytes is compared to the total available buffer length, otherwise needed_bytes is compared to the bytesused value. For the 'steal' queue the total available buffer length is always used.
-ENOMEM is returned if the buffers could not be obtained, 0 if all buffers where obtained from the 'from' list and if non-zero then
the number of stolen buffers is returned. */ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, struct ivtv_queue *to, int needed_bytes)
{ unsignedlong flags; int rc = 0; int from_free = from == &s->q_free; int to_free = to == &s->q_free; int bytes_available, bytes_steal;
/* move buffers from the tail of the 'steal' queue to the tail of the 'from' queue. Always copy all the buffers with the same dma_xfer_cnt value, this ensures that you do not end up with partial frame data
if one frame is stored in multiple buffers. */ while (dma_xfer_cnt == buf->dma_xfer_cnt) {
list_move_tail(steal->list.prev, &from->list);
rc++;
steal->buffers--;
steal->length -= s->buf_size;
steal->bytesused -= buf->bytesused - buf->readpos;
buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
from->buffers++;
from->length += s->buf_size;
bytes_available += s->buf_size; if (list_empty(&steal->list)) break;
buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
}
} if (from_free) {
u32 old_length = to->length;
while (to->length - old_length < needed_bytes) {
ivtv_queue_move_buf(s, from, to, 1);
}
} else {
u32 old_bytesused = to->bytesused;
while (to->bytesused - old_bytesused < needed_bytes) {
ivtv_queue_move_buf(s, from, to, to_free);
}
}
spin_unlock_irqrestore(&s->qlock, flags); return rc;
}
/* allocate stream buffers. Initially all buffers are in q_free. */ for (i = 0; i < s->buffers; i++) { struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
GFP_KERNEL|__GFP_NOWARN);
if (buf == NULL) break;
buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN); if (buf->buf == NULL) {
kfree(buf); break;
}
INIT_LIST_HEAD(&buf->list); if (ivtv_might_use_dma(s)) {
buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
buf->buf, s->buf_size + 256, s->dma);
ivtv_buf_sync_for_cpu(s, buf);
}
ivtv_enqueue(s, buf, &s->q_free);
} if (i == s->buffers) return 0;
IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
ivtv_stream_free(s); return -ENOMEM;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.