if (urb->status < 0) {
print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0;
}
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer;
buffer_size = urb->actual_length;
if (buffer_size > 0) {
bytes_parsed = 0;
if (dma_q->is_partial_line) { /* Handle the case where we were working on a partial
line */
sav_eav = dma_q->last_sav;
} else { /* Check for a SAV/EAV overlapping the
buffer boundary */
sav_eav &= 0xF0; /* Get the first line if we have some portion of an SAV/EAV from
the last buffer or a partial line */ if (sav_eav) {
bytes_parsed += cx231xx_get_vbi_line(dev, dma_q,
sav_eav, /* SAV/EAV */
p_buffer + bytes_parsed, /* p_buffer */
buffer_size - bytes_parsed); /* buffer size */
}
/* Now parse data that is completely in this buffer */
dma_q->is_partial_line = 0;
while (bytes_parsed < buffer_size) {
u32 bytes_used = 0;
sav_eav = cx231xx_find_next_SAV_EAV(
p_buffer + bytes_parsed, /* p_buffer */
buffer_size - bytes_parsed, /* buffer size */
&bytes_used); /* bytes used to get SAV/EAV */
/* Save the last four bytes of the buffer so we can
check the buffer boundary condition next time */
memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
bytes_parsed = 0;
}
/* This is called *without* dev->slock held; please keep it that way */ staticint vbi_buf_prepare(struct vb2_buffer *vb)
{ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
u32 height = 0;
u32 size;
dev->vbi_mode.bulk_ctl.nfields = -1; for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
urb = dev->vbi_mode.bulk_ctl.urb[i]; if (urb) { if (!irqs_disabled())
usb_kill_urb(urb); else
usb_unlink_urb(urb);
/* * Allocate URBs and start IRQ
*/ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb))
{ struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int rc;
dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n");
/* De-allocates all pending stuff */
cx231xx_uninit_vbi_isoc(dev);
/* clear if any halt */
usb_clear_halt(dev->udev,
usb_rcvbulkpipe(dev->udev,
dev->vbi_mode.end_point_addr));
/* submit urbs and enables IRQ */ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); if (rc) {
dev_err(dev->dev, "submit of urb %i failed (error=%i)\n", i, rc);
cx231xx_uninit_vbi_isoc(dev); return rc;
}
}
/* * Announces that a buffer were filled and request the next
*/ staticinlinevoid vbi_buffer_filled(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, struct cx231xx_buffer *buf)
{ /* Advice that buffer was filled */ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
/* If we don't have a buffer, just return the number of bytes we would
have copied if we had a buffer. */ if (!buf) {
dma_q->bytes_left_in_line -= bytes_to_copy;
dma_q->is_partial_line =
(dma_q->bytes_left_in_line == 0) ? 0 : 1; return bytes_to_copy;
}
/* copy the data to video buffer */
cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy);
/* * generic routine to get the next available buffer
*/ staticinlinevoid get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, struct cx231xx_buffer **buf)
{ struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); char *outp;
if (list_empty(&dma_q->active)) {
dev_err(dev->dev, "No active queue to serve\n");
dev->vbi_mode.bulk_ctl.buf = NULL;
*buf = NULL; return;
}
/* Get the next buffer */
*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.