// SPDX-License-Identifier: GPL-2.0-only /* * BCM283x / BCM271x Unicam Capture Driver * * Copyright (C) 2017-2020 - Raspberry Pi (Trading) Ltd. * Copyright (C) 2024 - Ideas on Board * * Dave Stevenson <dave.stevenson@raspberrypi.com> * * Based on TI am437x driver by * Benoit Parrot <bparrot@ti.com> * Lad, Prabhakar <prabhakar.csengg@gmail.com> * * and TI CAL camera interface driver by * Benoit Parrot <bparrot@ti.com> * * * There are two camera drivers in the kernel for BCM283x - this one and * bcm2835-camera (currently in staging). * * This driver directly controls the Unicam peripheral - there is no * involvement with the VideoCore firmware. Unicam receives CSI-2 or CCP2 data * and writes it into SDRAM. The only potential processing options are to * repack Bayer data into an alternate format, and applying windowing. The * repacking does not shift the data, so can repack V4L2_PIX_FMT_Sxxxx10P to * V4L2_PIX_FMT_Sxxxx10, or V4L2_PIX_FMT_Sxxxx12P to V4L2_PIX_FMT_Sxxxx12, but * not generically up to V4L2_PIX_FMT_Sxxxx16. Support for windowing may be * added later. * * It should be possible to connect this driver to any sensor with a suitable * output interface and V4L2 subdevice driver.
*/
/* * Unicam must request a minimum of 250Mhz from the VPU clock. * Otherwise the input FIFOs overrun and cause image corruption.
*/ #define UNICAM_MIN_VPU_CLOCK_RATE (250 * 1000 * 1000)
/* Unicam has an internal DMA alignment constraint of 16 bytes for each line. */ #define UNICAM_DMA_BPL_ALIGNMENT 16
/* * The image stride is stored in a 16 bit register, and needs to be aligned to * the DMA constraint. As the ISP in the same SoC has a 32 bytes alignment * constraint on its input, set the image stride alignment to 32 bytes here as * well to avoid incompatible configurations.
*/ #define UNICAM_IMAGE_BPL_ALIGNMENT 32 #define UNICAM_IMAGE_MAX_BPL ((1U << 16) - UNICAM_IMAGE_BPL_ALIGNMENT)
/* * Max width is therefore determined by the max stride divided by the number of * bits per pixel. Take 32bpp as a worst case. No imposed limit on the height, * so adopt a square image for want of anything better.
*/ #define UNICAM_IMAGE_MIN_WIDTH 16 #define UNICAM_IMAGE_MIN_HEIGHT 16 #define UNICAM_IMAGE_MAX_WIDTH (UNICAM_IMAGE_MAX_BPL / 4) #define UNICAM_IMAGE_MAX_HEIGHT UNICAM_IMAGE_MAX_WIDTH
/* * There's no intrinsic limits on the width and height for embedded data. Use * the same maximum values as for the image, to avoid overflows in the image * size computation.
*/ #define UNICAM_META_MIN_WIDTH 1 #define UNICAM_META_MIN_HEIGHT 1 #define UNICAM_META_MAX_WIDTH UNICAM_IMAGE_MAX_WIDTH #define UNICAM_META_MAX_HEIGHT UNICAM_IMAGE_MAX_HEIGHT
/* * Size of the dummy buffer. Can be any size really, but the DMA * allocation works in units of page sizes.
*/ #define UNICAM_DUMMY_BUF_SIZE PAGE_SIZE
/* * struct unicam_format_info - Unicam media bus format information * @fourcc: V4L2 pixel format FCC identifier. 0 if n/a. * @unpacked_fourcc: V4L2 pixel format FCC identifier if the data is expanded * out to 16bpp. 0 if n/a. * @code: V4L2 media bus format code. * @depth: Bits per pixel as delivered from the source. * @csi_dt: CSI data type. * @unpack: PUM value when unpacking to @unpacked_fourcc
*/ struct unicam_format_info {
u32 fourcc;
u32 unpacked_fourcc;
u32 code;
u8 depth;
u8 csi_dt;
u8 unpack;
};
/* Pointer to the current v4l2_buffer */ struct unicam_buffer *cur_frm; /* Pointer to the next v4l2_buffer */ struct unicam_buffer *next_frm; /* Used to store current pixel format */ struct v4l2_format fmt; /* Buffer queue used in video-buf */ struct vb2_queue buffer_queue; /* Queue of filled frames */ struct list_head dma_queue; /* IRQ lock for DMA queue */
spinlock_t dma_queue_lock; /* Identifies video device for this channel */ struct video_device video_dev; /* Pointer to the parent handle */ struct unicam_device *dev; struct media_pad pad; /* * Dummy buffer intended to be used by unicam * if we have no other queued buffers to swap to.
*/ struct unicam_buffer dummy_buf; void *dummy_buf_cpu_addr;
};
staticvoid unicam_wr_dma_addr(struct unicam_node *node, struct unicam_buffer *buf)
{ /* * Due to a HW bug causing buffer overruns in circular buffer mode under * certain (not yet fully known) conditions, the dummy buffer allocation * is set to a a single page size, but the hardware gets programmed with * a buffer size of 0.
*/
dma_addr_t endaddr = buf->dma_addr +
(buf != &node->dummy_buf ? buf->size : 0);
if (!(sta & (UNICAM_IS | UNICAM_PI0))) return IRQ_HANDLED;
/* * Look for either the Frame End interrupt or the Packet Capture status * to signal a frame end.
*/
fe = ista & UNICAM_FEI || sta & UNICAM_PI0;
/* * We must run the frame end handler first. If we have a valid next_frm * and we get a simultaneout FE + FS interrupt, running the FS handler * first would null out the next_frm ptr and we would have lost the * buffer forever.
*/ if (fe) { bool inc_seq = unicam->frame_started;
/* * Ensure we have swapped buffers already as we can't * stop the peripheral. If no buffer is available, use a * dummy buffer to dump out frames until we get a new buffer * to use.
*/ for (i = 0; i < ARRAY_SIZE(unicam->node); i++) { struct unicam_node *node = &unicam->node[i];
if (!vb2_start_streaming_called(&node->buffer_queue)) continue;
/* * If cur_frm == next_frm, it means we have not had * a chance to swap buffers, likely due to having * multiple interrupts occurring simultaneously (like FE * + FS + LS). In this case, we cannot signal the buffer * as complete, as the HW will reuse that buffer.
*/ if (node->cur_frm && node->cur_frm != node->next_frm) {
unicam_process_buffer_complete(node, sequence);
inc_seq = true;
}
node->cur_frm = node->next_frm;
}
/* * Increment the sequence number conditionally on either a FS * having already occurred, or in the FE + FS condition as * caught in the FE handler above. This ensures the sequence * number corresponds to the frames generated by the sensor, not * the frames dequeued to userland.
*/ if (inc_seq) {
unicam->sequence++;
unicam->frame_started = false;
}
}
if (ista & UNICAM_FSI) { /* * Timestamp is to be when the first data byte was captured, * aka frame start.
*/
ts = ktime_get_ns(); for (i = 0; i < ARRAY_SIZE(unicam->node); i++) { struct unicam_node *node = &unicam->node[i];
if (!vb2_start_streaming_called(&node->buffer_queue)) continue;
if (node->cur_frm)
node->cur_frm->vb.vb2_buf.timestamp = ts; else
dev_dbg(unicam->v4l2_dev.dev, "ISR: [%d] Dropping frame, buffer not available at FS\n",
i); /* * Set the next frame output to go to a dummy frame * if we have not managed to obtain another frame * from the queue.
*/
unicam_schedule_dummy_buffer(node);
}
/* * Cannot swap buffer at frame end, there may be a race condition * where the HW does not actually swap it if the new frame has * already started.
*/ if (ista & (UNICAM_FSI | UNICAM_LCI) && !fe) { for (i = 0; i < ARRAY_SIZE(unicam->node); i++) { struct unicam_node *node = &unicam->node[i];
if (!vb2_start_streaming_called(&node->buffer_queue)) continue;
spin_lock(&node->dma_queue_lock); if (!list_empty(&node->dma_queue) && !node->next_frm)
unicam_schedule_next_buffer(node);
spin_unlock(&node->dma_queue_lock);
}
}
/* * Enable lane clocks. The register is structured as follows: * * [9:8] - DAT3 * [7:6] - DAT2 * [5:4] - DAT1 * [3:2] - DAT0 * [1:0] - CLK * * Enabled lane must be set to b01, and disabled lanes to b00. The clock * lane is always enabled.
*/
val = 0x155 & GENMASK(unicam->pipe.num_data_lanes * 2 + 1, 0);
unicam_clk_write(unicam, val);
/* Enable clock lane and set up terminations */
val = 0; if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) { /* CSI2 */
unicam_set_field(&val, 1, UNICAM_CLE);
unicam_set_field(&val, 1, UNICAM_CLLPE); if (!(unicam->bus_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK)) {
unicam_set_field(&val, 1, UNICAM_CLTRE);
unicam_set_field(&val, 1, UNICAM_CLHSE);
}
} else { /* CCP2 */
unicam_set_field(&val, 1, UNICAM_CLE);
unicam_set_field(&val, 1, UNICAM_CLHSE);
unicam_set_field(&val, 1, UNICAM_CLTRE);
}
unicam_reg_write(unicam, UNICAM_CLK, val);
/* * Enable required data lanes with appropriate terminations. * The same value needs to be written to UNICAM_DATn registers for * the active lanes, and 0 for inactive ones.
*/
val = 0; if (unicam->bus_type == V4L2_MBUS_CSI2_DPHY) { /* CSI2 */
unicam_set_field(&val, 1, UNICAM_DLE);
unicam_set_field(&val, 1, UNICAM_DLLPE); if (!(unicam->bus_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK)) {
unicam_set_field(&val, 1, UNICAM_DLTRE);
unicam_set_field(&val, 1, UNICAM_DLHSE);
}
} else { /* CCP2 */
unicam_set_field(&val, 1, UNICAM_DLE);
unicam_set_field(&val, 1, UNICAM_DLHSE);
unicam_set_field(&val, 1, UNICAM_DLTRE);
}
unicam_reg_write(unicam, UNICAM_DAT0, val);
if (unicam->pipe.num_data_lanes == 1)
val = 0;
unicam_reg_write(unicam, UNICAM_DAT1, val);
if (unicam->max_data_lanes > 2) { /* * Registers UNICAM_DAT2 and UNICAM_DAT3 only valid if the * instance supports more than 2 data lanes.
*/ if (unicam->pipe.num_data_lanes == 2)
val = 0;
unicam_reg_write(unicam, UNICAM_DAT2, val);
if (unicam->pipe.num_data_lanes == 3)
val = 0;
unicam_reg_write(unicam, UNICAM_DAT3, val);
}
ret = unicam_get_image_vc_dt(unicam, state, &vc, &dt); if (ret) { /* * If the source doesn't support frame descriptors, default to * VC 0 and use the DT corresponding to the format.
*/
vc = 0;
dt = fmtinfo->csi_dt;
}
/* * Enable trigger only for the first frame to * sync correctly to the FS from the source.
*/
unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
}
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
unicam->subdev.enabled_streams) return -EBUSY;
/* No transcoding, source and sink formats must match. */ if (unicam_sd_pad_is_source(format->pad)) return v4l2_subdev_get_fmt(sd, state, format);
/* * Allowed formats for the stream on the sink pad depend on what source * pad the stream is routed to. Find the corresponding source pad and * use it to validate the media bus code.
*/
ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
format->pad, format->stream,
&source_pad, &source_stream); if (ret) return ret;
ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
&other_pad, &other_stream); if (ret) return ret;
ret = v4l2_subdev_enable_streams(unicam->sensor.subdev,
unicam->sensor.pad->index,
BIT(other_stream)); if (ret) {
dev_err(unicam->dev, "stream on failed in subdev\n"); return ret;
}
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(unicam->subdev.pads),
unicam->subdev.pads); if (ret) {
dev_err(unicam->dev, "Failed to initialize media entity: %d\n",
ret); return ret;
}
ret = v4l2_subdev_init_finalize(sd); if (ret) {
dev_err(unicam->dev, "Failed to initialize subdev: %d\n", ret); goto err_entity;
}
ret = v4l2_device_register_subdev(&unicam->v4l2_dev, sd); if (ret) {
dev_err(unicam->dev, "Failed to register subdev: %d\n", ret); goto err_subdev;
}
if (vb2_plane_size(vb, 0) < size) {
dev_dbg(node->dev->dev, "data will not fit into plane (%lu < %u)\n",
vb2_plane_size(vb, 0), size); return -EINVAL;
}
/* * Start the pipeline. This validates all links, and populates the * pipeline structure.
*/
ret = video_device_pipeline_start(&node->video_dev, &unicam->pipe.pipe); if (ret < 0) {
dev_dbg(unicam->dev, "Failed to start media pipeline: %d\n", ret); goto err_buffers;
}
/* * Determine which video nodes are included in the pipeline, and get the * number of data lanes.
*/ if (unicam->pipe.pipe.start_count == 1) {
unicam->pipe.nodes = 0;
media_pipeline_for_each_pad(&unicam->pipe.pipe, &iter, pad) { if (pad->entity != &unicam->subdev.sd.entity) continue;
if (!(unicam->pipe.nodes & BIT(UNICAM_IMAGE_NODE))) {
dev_dbg(unicam->dev, "Pipeline does not include image node\n");
ret = -EPIPE; goto err_pipeline;
}
ret = unicam_num_data_lanes(unicam); if (ret < 0) goto err_pipeline;
unicam->pipe.num_data_lanes = ret;
dev_dbg(unicam->dev, "Running with %u data lanes, nodes %u\n",
unicam->pipe.num_data_lanes, unicam->pipe.nodes);
}
/* Arm the node with the first buffer from the DMA queue. */
spin_lock_irqsave(&node->dma_queue_lock, flags);
buf = list_first_entry(&node->dma_queue, struct unicam_buffer, list);
node->cur_frm = buf;
node->next_frm = buf;
list_del(&buf->list);
spin_unlock_irqrestore(&node->dma_queue_lock, flags);
/* * Wait for all the video devices in the pipeline to have been started * before starting the hardware. In the general case, this would * prevent capturing multiple streams independently. However, the * Unicam DMA engines are not generic, they have been designed to * capture image data and embedded data from the same camera sensor. * Not only does the main use case not benefit from independent * capture, it requires proper synchronization of the streams at start * time.
*/ if (unicam->pipe.pipe.start_count < hweight32(unicam->pipe.nodes)) return 0;
ret = pm_runtime_resume_and_get(unicam->dev); if (ret < 0) {
dev_err(unicam->dev, "PM runtime resume failed: %d\n", ret); goto err_pipeline;
}
/* Enable the streams on the source. */
ret = v4l2_subdev_enable_streams(&unicam->subdev.sd,
UNICAM_SD_PAD_SOURCE_IMAGE,
BIT(0)); if (ret < 0) {
dev_err(unicam->dev, "stream on failed in subdev\n"); goto err_pm_put;
}
if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE)) {
ret = v4l2_subdev_enable_streams(&unicam->subdev.sd,
UNICAM_SD_PAD_SOURCE_METADATA,
BIT(0)); if (ret < 0) {
dev_err(unicam->dev, "stream on failed in subdev\n"); goto err_disable_streams;
}
}
/* Stop the hardware when the first video device gets stopped. */ if (unicam->pipe.pipe.start_count == hweight32(unicam->pipe.nodes)) { if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE))
v4l2_subdev_disable_streams(&unicam->subdev.sd,
UNICAM_SD_PAD_SOURCE_METADATA,
BIT(0));
/* * Default to the first format if the requested pixel format code isn't * supported.
*/
fmtinfo = unicam_find_format_by_fourcc(pix->pixelformat,
UNICAM_SD_PAD_SOURCE_IMAGE); if (!fmtinfo) {
fmtinfo = &unicam_image_formats[0];
pix->pixelformat = fmtinfo->fourcc;
}
/* * Default to the first format if the requested pixel format code isn't * supported.
*/
fmtinfo = unicam_find_format_by_fourcc(meta->dataformat,
UNICAM_SD_PAD_SOURCE_METADATA); if (!fmtinfo) {
fmtinfo = &unicam_meta_formats[0];
meta->dataformat = fmtinfo->fourcc;
}
if (type == UNICAM_IMAGE_NODE)
vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
node->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad); if (ret) goto err_unicam_put;
node->dummy_buf.size = UNICAM_DUMMY_BUF_SIZE;
node->dummy_buf_cpu_addr = dma_alloc_coherent(unicam->dev,
node->dummy_buf.size,
&node->dummy_buf.dma_addr,
GFP_KERNEL); if (!node->dummy_buf_cpu_addr) {
dev_err(unicam->dev, "Unable to allocate dummy buffer.\n");
ret = -ENOMEM; goto err_entity_cleanup;
}
unicam_set_default_format(node);
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) {
dev_err(unicam->dev, "Unable to register video device %s\n",
vdev->name); goto err_dma_free;
}
node->registered = true;
ret = media_create_pad_link(&unicam->subdev.sd.entity,
pad_index,
&node->video_dev.entity,
0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE); if (ret) { /* * No need for cleanup, the caller will unregister the * video device, which will drop the reference on the * device and trigger the cleanup.
*/
dev_err(unicam->dev, "Unable to create pad link for %s\n",
unicam->sensor.subdev->name); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.