/* * Backward compatibility: replace deprecated RGB formats by their XRGB * equivalent. This selects the format older userspace applications want * while still exposing the new format.
*/ for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) { if (xrgb_formats[i][0] == pix->pixelformat) {
pix->pixelformat = xrgb_formats[i][1]; break;
}
}
/* * Retrieve format information and select the default format if the * requested format isn't supported.
*/
info = vsp1_get_format_info(video->vsp1, pix->pixelformat); if (info == NULL)
info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
/* * Adjust the colour space fields. On capture devices, userspace needs * to set the V4L2_PIX_FMT_FLAG_SET_CSC to override the defaults. Reset * all fields to *_DEFAULT if the flag isn't set, to then handle * capture and output devices in the same way.
*/ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
!(pix->flags & V4L2_PIX_FMT_FLAG_SET_CSC)) {
pix->colorspace = V4L2_COLORSPACE_DEFAULT;
pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
pix->quantization = V4L2_QUANTIZATION_DEFAULT;
}
/* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
width = round_down(width, info->hsub);
height = round_down(height, info->vsub);
/* Clamp the width and height. */
pix->width = clamp(width, info->hsub, VSP1_VIDEO_MAX_WIDTH);
pix->height = clamp(height, info->vsub, VSP1_VIDEO_MAX_HEIGHT);
/* * Compute and clamp the stride and image size. While not documented in * the datasheet, strides not aligned to a multiple of 128 bytes result * in image corruption.
*/ for (i = 0; i < min(info->planes, 2U); ++i) { unsignedint hsub = i > 0 ? info->hsub : 1; unsignedint vsub = i > 0 ? info->vsub : 1; unsignedint align = 128; unsignedint bpl;
if (info->planes == 3) { /* The second and third planes must have the same stride. */
pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
}
/* * vsp1_video_complete_buffer - Complete the current buffer * @video: the video node * * This function completes the current buffer by filling its sequence number, * time stamp and payload size, and hands it back to the vb2 core. * * Return the next queued buffer or NULL if the queue is empty.
*/ staticstruct vsp1_vb2_buffer *
vsp1_video_complete_buffer(struct vsp1_video *video)
{ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; struct vsp1_vb2_buffer *next = NULL; struct vsp1_vb2_buffer *done; unsignedlong flags; unsignedint i;
spin_lock_irqsave(&video->irqlock, flags);
if (list_empty(&video->irqqueue)) {
spin_unlock_irqrestore(&video->irqlock, flags); return NULL;
}
if (!list_empty(&video->irqqueue))
next = list_first_entry(&video->irqqueue, struct vsp1_vb2_buffer, queue);
spin_unlock_irqrestore(&video->irqlock, flags);
done->buf.sequence = pipe->sequence;
done->buf.vb2_buf.timestamp = ktime_get_ns(); for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
vb2_set_plane_payload(&done->buf.vb2_buf, i,
vb2_plane_size(&done->buf.vb2_buf, i));
vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
/* * If the VSP hardware isn't configured yet (which occurs either when * processing the first frame or after a system suspend/resume), add the * cached stream configuration to the display list to perform a full * initialisation.
*/ if (!pipe->configured)
vsp1_dl_list_add_body(dl, pipe->stream_config);
/* Run the first partition. */
vsp1_video_pipeline_run_partition(pipe, dl, 0);
/* Process consecutive partitions as necessary. */ for (partition = 1; partition < pipe->partitions; ++partition) { struct vsp1_dl_list *dl_next;
dl_next = vsp1_dl_list_get(pipe->output->dlm);
/* * An incomplete chain will still function, but output only * the partitions that had a dl available. The frame end * interrupt will be marked on the last dl in the chain.
*/ if (!dl_next) {
dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n"); break;
}
/* M2M Pipelines should never call here with an incomplete frame. */
WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED));
spin_lock_irqsave(&pipe->irqlock, flags);
/* Complete buffers on all video nodes. */ for (i = 0; i < vsp1->info->rpf_count; ++i) { if (!pipe->inputs[i]) continue;
vsp1_video_frame_end(pipe, pipe->inputs[i]);
}
vsp1_video_frame_end(pipe, pipe->output);
state = pipe->state;
pipe->state = VSP1_PIPELINE_STOPPED;
/* * If a stop has been requested, mark the pipeline as stopped and * return. Otherwise restart the pipeline if ready.
*/ if (state == VSP1_PIPELINE_STOPPING)
wake_up(&pipe->wq); elseif (vsp1_pipeline_ready(pipe))
vsp1_video_pipeline_run(pipe);
/* * A BRU or BRS is present in the pipeline, store its input pad * number in the input RPF for use when configuring the RPF.
*/ if (entity->type == VSP1_ENTITY_BRU ||
entity->type == VSP1_ENTITY_BRS) { /* BRU and BRS can't be chained. */ if (brx) {
ret = -EPIPE; goto out;
}
case VSP1_ENTITY_BRU: case VSP1_ENTITY_BRS:
pipe->brx = e; break;
case VSP1_ENTITY_HGO:
pipe->hgo = e; break;
case VSP1_ENTITY_HGT:
pipe->hgt = e; break;
default: break;
}
}
media_graph_walk_cleanup(&graph);
/* We need one output and at least one input. */ if (pipe->num_inputs == 0 || !pipe->output) return -EPIPE;
/* * Follow links downstream for each input and make sure the graph * contains no loop and that all branches end at the output WPF.
*/ for (i = 0; i < video->vsp1->info->rpf_count; ++i) { if (!pipe->inputs[i]) continue;
ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
pipe->output); if (ret < 0) return ret;
}
return 0;
}
staticint vsp1_video_pipeline_init(struct vsp1_pipeline *pipe, struct vsp1_video *video)
{ int ret;
vsp1_pipeline_init(pipe);
pipe->frame_end = vsp1_video_pipeline_frame_end;
ret = vsp1_video_pipeline_build(pipe, video); if (ret) return ret;
/* * Get a pipeline object for the video node. If a pipeline has already * been allocated just increment its reference count and return it. * Otherwise allocate a new pipeline and initialize it, it will be freed * when the last reference is released.
*/ if (!video->rwpf->entity.pipe) {
pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); if (!pipe) return ERR_PTR(-ENOMEM);
/* * Partitions are computed on the size before rotation, use the format * at the WPF sink.
*/
format = v4l2_subdev_state_get_format(pipe->output->entity.state,
RWPF_PAD_SINK);
div_size = format->width;
/* * Only Gen3+ hardware requires image partitioning, Gen2 will operate * with a single partition that covers the whole output.
*/ if (vsp1->info->gen >= 3) {
list_for_each_entry(entity, &pipe->entities, list_pipe) { unsignedint entity_max;
for (i = 0; i < pipe->partitions; ++i)
vsp1_pipeline_calculate_partition(pipe, &pipe->part_table[i],
div_size, i);
return 0;
}
staticint vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
{ struct vsp1_entity *entity; int ret;
/* Determine this pipelines sizes for image partitioning support. */
ret = vsp1_video_pipeline_setup_partitions(pipe); if (ret < 0) return ret;
if (pipe->uds) { struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
/* * If a BRU or BRS is present in the pipeline before the UDS, * the alpha component doesn't need to be scaled as the BRU and * BRS output alpha value is fixed to 255. Otherwise we need to * scale the alpha component only when available at the input * RPF.
*/ if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
pipe->uds_input->type == VSP1_ENTITY_BRS) {
uds->scale_alpha = false;
} else { struct vsp1_rwpf *rpf =
to_rwpf(&pipe->uds_input->subdev);
uds->scale_alpha = rpf->fmtinfo->alpha;
}
}
/* * Compute and cache the stream configuration into a body. The cached * body will be added to the display list by vsp1_video_pipeline_run() * whenever the pipeline needs to be fully reconfigured.
*/
pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm); if (!pipe->stream_config) return -ENOMEM;
mutex_lock(&pipe->lock); if (pipe->stream_count == pipe->num_inputs) {
ret = vsp1_video_setup_pipeline(pipe); if (ret < 0) {
vsp1_video_release_buffers(video);
vsp1_video_cleanup_pipeline(pipe);
mutex_unlock(&pipe->lock); return ret;
}
start_pipeline = true;
}
pipe->stream_count++;
mutex_unlock(&pipe->lock);
/* * vsp1_pipeline_ready() is not sufficient to establish that all streams * are prepared and the pipeline is configured, as multiple streams * can race through streamon with buffers already queued; Therefore we * don't even attempt to start the pipeline until the last stream has * called through here.
*/ if (!start_pipeline) return 0;
spin_lock_irqsave(&pipe->irqlock, flags); if (vsp1_pipeline_ready(pipe))
vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags);
/* * Clear the buffers ready flag to make sure the device won't be started * by a QBUF on the video node on the other side of the pipeline.
*/
spin_lock_irqsave(&video->irqlock, flags);
pipe->buffers_ready &= ~(1 << video->pipe_index);
spin_unlock_irqrestore(&video->irqlock, flags);
mutex_lock(&pipe->lock); if (--pipe->stream_count == pipe->num_inputs) { /* Stop the pipeline. */
ret = vsp1_pipeline_stop(pipe); if (ret == -ETIMEDOUT)
dev_err(video->vsp1->dev, "pipeline stop timeout\n");
if (vb2_queue_is_busy(&video->queue, file)) return -EBUSY;
/* * Get a pipeline for the video node and start streaming on it. No link * touching an entity in the pipeline can be activated or deactivated * once streaming is started.
*/
mutex_lock(&mdev->graph_mutex);
pipe = vsp1_video_pipeline_get(video); if (IS_ERR(pipe)) {
mutex_unlock(&mdev->graph_mutex); return PTR_ERR(pipe);
}
ret = __video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) {
mutex_unlock(&mdev->graph_mutex); goto err_pipe;
}
mutex_unlock(&mdev->graph_mutex);
/* * Verify that the configured format matches the output of the connected * subdev.
*/
ret = vsp1_video_verify_format(video); if (ret < 0) goto err_stop;
/* Start the queue. */
ret = vb2_streamon(&video->queue, type); if (ret < 0) goto err_stop;
/* ----------------------------------------------------------------------------- * Media entity operations
*/
staticint vsp1_video_link_validate(struct media_link *link)
{ /* * Ideally, link validation should be implemented here instead of * calling vsp1_video_verify_format() in vsp1_video_streamon() * manually. That would however break userspace that start one video * device before configures formats on other video devices in the * pipeline. This operation is just a no-op to silence the warnings * from v4l2_subdev_link_validate().
*/ return 0;
}
/* * To avoid increasing the system suspend time needlessly, loop over the * pipelines twice, first to set them all to the stopping state, and * then to wait for the stop to complete.
*/ for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; struct vsp1_pipeline *pipe;
if (wpf == NULL) continue;
pipe = wpf->entity.pipe; if (pipe == NULL) continue;
/* ... and register the video device. */
video->video.queue = &video->queue;
ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) {
dev_err(video->vsp1->dev, "failed to register video device\n"); goto error;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.