staticvoid
uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, struct uvc_buffer *buf)
{ void *mem = req->buf; struct uvc_request *ureq = req->context; int len = video->req_size; int ret;
/* Add a header at the beginning of the payload. */ if (video->payload_size == 0) {
ret = uvc_video_encode_header(video, buf, mem, len);
video->payload_size += ret;
mem += ret;
len -= ret;
}
/* Process video data. */
len = min_t(int, video->max_payload_size - video->payload_size, len);
ret = uvc_video_encode_data(video, buf, mem, len);
/* * Callers must take care to hold req_lock when this function may be called * from multiple threads. For example, when frames are streaming to the host.
*/ staticvoid
uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
{
sg_free_table(&ureq->sgt); if (ureq->req && ep) {
usb_ep_free_request(ep, ureq->req);
ureq->req = NULL;
}
kfree(ureq->req_buffer);
ureq->req_buffer = NULL;
if (!list_empty(&ureq->list))
list_del_init(&ureq->list);
kfree(ureq);
}
staticint uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
{ int ret;
ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); if (ret < 0) {
uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
ret);
/* If the endpoint is disabled the descriptor may be NULL. */ if (video->ep->desc) { /* Isochronous endpoints can't be halted. */ if (usb_endpoint_xfer_bulk(video->ep->desc))
usb_ep_set_halt(video->ep);
}
}
/* This function must be called with video->req_lock held. */ staticint uvcg_video_usb_req_queue(struct uvc_video *video, struct usb_request *req, bool queue_to_ep)
{ bool is_bulk = video->max_payload_size; struct list_head *list = NULL;
if (!video->is_enabled) return -ENODEV;
if (queue_to_ep) { struct uvc_request *ureq = req->context; /* * With USB3 handling more requests at a higher speed, we can't * afford to generate an interrupt for every request. Decide to * interrupt: * * - When no more requests are available in the free queue, as * this may be our last chance to refill the endpoint's * request queue. * * - When this is request is the last request for the video * buffer, as we want to start sending the next video buffer * ASAP in case it doesn't get started already in the next * iteration of this loop. * * - Four times over the length of the requests queue (as * indicated by video->uvc_num_requests), as a trade-off * between latency and interrupt load.
*/ if (list_empty(&video->req_free) || ureq->last_buf ||
!(video->req_int_count %
min(DIV_ROUND_UP(video->uvc_num_requests, 4), UVCG_REQ_MAX_INT_COUNT))) {
video->req_int_count = 0;
req->no_interrupt = 0;
} else {
req->no_interrupt = 1;
}
video->req_int_count++; return uvcg_video_ep_queue(video, req);
} /* * If we're not queuing to the ep, for isoc we're queuing * to the req_ready list, otherwise req_free.
*/
list = is_bulk ? &video->req_free : &video->req_ready;
list_add_tail(&req->list, list); return 0;
}
spin_lock_irqsave(&video->req_lock, flags);
atomic_dec(&video->queued); if (!video->is_enabled) { /* * When is_enabled is false, uvcg_video_disable() ensures * that in-flight uvc_buffers are returned, so we can * safely call free_request without worrying about * last_buf.
*/
uvc_video_free_request(ureq, ep);
spin_unlock_irqrestore(&video->req_lock, flags); return;
}
case -EXDEV:
uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n"); if (req->length != 0)
queue->flags |= UVC_QUEUE_DROP_INCOMPLETE; break;
case -ESHUTDOWN: /* disconnect from host. */
uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1); break;
default:
uvcg_warn(&video->uvc->func, "VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
}
if (last_buf) {
spin_lock_irqsave(&queue->irqlock, flags);
uvcg_complete_buffer(queue, last_buf);
spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags); /* * Video stream might have been disabled while we were * processing the current usb_request. So make sure * we're still streaming before queueing the usb_request * back to req_free
*/ if (!video->is_enabled) {
uvc_video_free_request(ureq, ep);
spin_unlock_irqrestore(&video->req_lock, flags);
uvcg_queue_cancel(queue, 0);
return;
}
list_add_tail(&req->list, &video->req_free); /* * Queue work to the wq as well since it is possible that a * buffer may not have been completely encoded with the set of * in-flight usb requests for whih the complete callbacks are * firing. * In that case, if we do not queue work to the worker thread, * the buffer will never be marked as complete - and therefore * not be returned to userpsace. As a result, * dequeue -> queue -> dequeue flow of uvc buffers will not * happen. Since there are is a new free request wake up the pump.
*/
queue_work(video->async_wq, &video->pump);
while (true) { if (!video->ep->enabled) return;
spin_lock_irqsave(&video->req_lock, flags); /* * Here we check whether any request is available in the ready * list. If it is, queue it to the ep and add the current * usb_request to the req_free list - for video_pump to fill in. * Otherwise, just use the current usb_request to queue a 0 * length request to the ep. Since we always add to the req_free * list if we dequeue from the ready list, there will never * be a situation where the req_free list is completely out of * requests and cannot recover.
*/ if (!list_empty(&video->req_ready)) {
req = list_first_entry(&video->req_ready, struct usb_request, list);
} else { if (list_empty(&video->req_free) ||
(atomic_read(&video->queued) > UVCG_REQ_MAX_ZERO_COUNT)) {
spin_unlock_irqrestore(&video->req_lock, flags);
/* * Queue to the endpoint. The actual queueing to ep will * only happen on one thread - the async_wq for bulk endpoints * and this thread for isoc endpoints.
*/
ret = uvcg_video_usb_req_queue(video, req, !is_bulk); if (ret < 0) { /* * Endpoint error, but the stream is still enabled. * Put request back in req_free for it to be cleaned * up later.
*/
list_add_tail(&req->list, &video->req_free); /* * There is a new free request - wake up the pump.
*/
queue_work(video->async_wq, &video->pump);
if (req_size > max_req_size) { /* The prepared interval length and expected buffer size * is not possible to stream with the currently configured * isoc bandwidth. Fallback to the maximum.
*/
req_size = max_req_size;
}
video->req_size = req_size;
/* We need to compensate the amount of requests to be * allocated with the maximum amount of zero length requests. * Since it is possible that hw_submit will initially * enqueue some zero length requests and we then will not be * able to fully encode one frame.
*/
video->uvc_num_requests = nreq + UVCG_REQ_MAX_ZERO_COUNT;
video->reqs_per_frame = nreq;
}
staticint
uvc_video_alloc_requests(struct uvc_video *video)
{ struct uvc_request *ureq; unsignedint i; int ret = -ENOMEM;
/* -------------------------------------------------------------------------- * Video streaming
*/
/* * uvcg_video_pump - Pump video data into the USB requests * * This function fills the available USB requests (listed in req_free) with * video data from the queued buffers.
*/ staticvoid uvcg_video_pump(struct work_struct *work)
{ struct uvc_video *video = container_of(work, struct uvc_video, pump); struct uvc_video_queue *queue = &video->queue; /* video->max_payload_size is only set when using bulk transfer */ bool is_bulk = video->max_payload_size; struct usb_request *req = NULL; struct uvc_buffer *buf; unsignedlong flags; int ret = 0;
while (true) { if (!video->ep->enabled) return;
/* * Check is_enabled and retrieve the first available USB * request, protected by the request lock.
*/
spin_lock_irqsave(&video->req_lock, flags); if (!video->is_enabled || list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags); return;
}
req = list_first_entry(&video->req_free, struct usb_request,
list);
list_del(&req->list);
spin_unlock_irqrestore(&video->req_lock, flags);
/* * Retrieve the first available video buffer and fill the * request, protected by the video queue irqlock.
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue); if (!buf) { /* * Either the queue has been disconnected or no video buffer * available for bulk transfer. Either way, stop processing * further.
*/
spin_unlock_irqrestore(&queue->irqlock, flags); break;
}
video->encode(req, video, buf);
spin_unlock_irqrestore(&queue->irqlock, flags);
spin_lock_irqsave(&video->req_lock, flags); /* For bulk end points we queue from the worker thread * since we would preferably not want to wait on requests * to be ready, in the uvcg_video_complete() handler. * For isoc endpoints we add the request to the ready list * and only queue it to the endpoint from the complete handler.
*/
ret = uvcg_video_usb_req_queue(video, req, is_bulk);
spin_unlock_irqrestore(&video->req_lock, flags);
/* * Remove any in-flight buffers from the uvc_requests * because we want to return them before cancelling the * queue. This ensures that we aren't stuck waiting for * all complete callbacks to come through before disabling * vb2 queue.
*/
list_for_each_entry(ureq, &video->ureqs, list) { if (ureq->last_buf) {
list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
ureq->last_buf = NULL;
}
}
spin_unlock_irqrestore(&video->req_lock, flags);
spin_lock_irqsave(&video->req_lock, flags); /* * Remove all uvc_requests from ureqs with list_del_init * This lets uvc_video_free_request correctly identify * if the uvc_request is attached to a list or not when freeing * memory.
*/
list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
list_del_init(&ureq->list);
/* * Return all the video buffers before disabling the queue.
*/
spin_lock_irqsave(&video->queue.irqlock, flags);
list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
list_del(&buf->queue);
uvcg_complete_buffer(&video->queue, buf);
}
spin_unlock_irqrestore(&video->queue.irqlock, flags);
uvcg_queue_enable(&video->queue, 0); return 0;
}
/* * Enable the video stream.
*/ int uvcg_video_enable(struct uvc_video *video)
{ int ret;
if (video->ep == NULL) {
uvcg_info(&video->uvc->func, "Video enable failed, device is uninitialized.\n"); return -ENODEV;
}
/* * Safe to access request related fields without req_lock because * this is the only thread currently active, and no other * request handling thread will become active until this function * returns.
*/
video->is_enabled = true;
if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0) return ret;
if ((ret = uvc_video_alloc_requests(video)) < 0) return ret;
/* * Initialize the UVC video stream.
*/ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
video->is_enabled = false;
INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
INIT_LIST_HEAD(&video->req_ready);
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);
/* Allocate a work queue for asynchronous video pump handler. */
video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!video->async_wq) return -EINVAL;
/* Allocate a kthread for asynchronous hw submit handler. */
video->kworker = kthread_run_worker(0, "UVCG"); if (IS_ERR(video->kworker)) {
uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n"); return PTR_ERR(video->kworker);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.