/* * These are raw formats used in Intel's third generation of * Image Processing Unit known as IPU3. * 10bit raw bayer packed, 32 bytes for every 25 pixels, * last LSB 6 bits unused.
*/ staticconststruct ipu3_cio2_fmt formats[] = {
{ /* put default entry at beginning */
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
.mipicode = 0x2b,
.bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_Y10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_Y10,
.mipicode = 0x2b,
.bpp = 10,
},
};
/* * cio2_find_format - lookup color format by fourcc or/and media bus code * @pixelformat: fourcc to match, ignored if null * @mbus_code: media bus code to match, ignored if null
*/ staticconststruct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat, const u32 *mbus_code)
{ unsignedint i;
for (i = 0; i < ARRAY_SIZE(formats); i++) { if (pixelformat && *pixelformat != formats[i].fourcc) continue; if (mbus_code && *mbus_code != formats[i].mbus_code) continue;
return &formats[i];
}
return NULL;
}
staticinline u32 cio2_bytesperline(constunsignedint width)
{ /* * 64 bytes for every 50 pixels, the line length * in bytes is multiple of 64 (line end alignment).
*/ return DIV_ROUND_UP(width, 50) * 64;
}
cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL); if (!cio2->dummy_page || !cio2->dummy_lop) {
cio2_fbpt_exit_dummy(cio2); return -ENOMEM;
} /* * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each * Initialize each entry to dummy_page bus base address.
*/ for (i = 0; i < CIO2_LOP_ENTRIES; i++)
cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
return 0;
}
staticvoid cio2_fbpt_entry_enable(struct cio2_device *cio2, struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
{ /* * The CPU first initializes some fields in fbpt, then sets * the VALID bit, this barrier is to ensure that the DMA(device) * does not see the VALID bit enabled before other fields are * initialized; otherwise it could lead to havoc.
*/
dma_wmb();
/* * Request interrupts for start and completion * Valid bit is applicable only to 1st entry
*/
entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
}
/* Initialize fpbt entries to point to dummy frame */ staticvoid cio2_fbpt_entry_init_dummy(struct cio2_device *cio2, struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{ unsignedint i;
for (i = 0; i < CIO2_MAX_LOPS; i++)
entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
cio2_fbpt_entry_enable(cio2, entry);
}
/* Initialize fpbt entries to point to a given buffer */ staticvoid cio2_fbpt_entry_init_buf(struct cio2_device *cio2, struct cio2_buffer *b, struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{ struct vb2_buffer *vb = &b->vbb.vb2_buf; unsignedint length = vb->planes[0].length; int remaining, i;
entry[0].first_entry.first_page_offset = b->offset;
remaining = length + entry[0].first_entry.first_page_offset;
entry[1].second_entry.num_of_pages = PFN_UP(remaining); /* * last_page_available_bytes has the offset of the last byte in the * last page which is still accessible by DMA. DMA cannot access * beyond this point. Valid range for this is from 0 to 4095. * 0 indicates 1st byte in the page is DMA accessible. * 4095 (PAGE_SIZE - 1) means every single byte in the last page * is available for DMA transfer.
*/
remaining = offset_in_page(remaining) ?: PAGE_SIZE;
entry[1].second_entry.last_page_available_bytes = remaining - 1; /* Fill FBPT */
remaining = length;
i = 0; while (remaining > 0) {
entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
entry++;
i++;
}
/* * The first not meaningful FBPT entry should point to a valid LOP
*/
entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
/* * The CSI2 receiver has several parameters affecting * the receiver timings. These depend on the MIPI bus frequency * F in Hz (sensor transmitter rate) as follows: * register value = (A/1e9 + B * UI) / COUNT_ACC * where * UI = 1 / (2 * F) in seconds * COUNT_ACC = counter accuracy in seconds * For IPU3 COUNT_ACC = 0.0625 * * A and B are coefficients from the table below, * depending whether the register minimum or maximum value is * calculated. * Minimum Maximum * Clock lane A B A B * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 * Data lanes * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 * * We use the minimum values of both A and B.
*/
/* * shift for keeping value range suitable for 32-bit integer arithmetic
*/ #define LIMIT_SHIFT 8
if (WARN_ON(freq <= 0 || freq > S32_MAX)) return def; /* * b could be 0, -2 or -8, so |accinv * b| is always * less than (1 << ds) and thus |r| < 500000000.
*/
r = accinv * b * (uiinv >> LIMIT_SHIFT);
r = r / (s32)freq; /* max value of a is 95 */
r += accinv * a;
return r;
};
/* Calculate the delay value for termination enable of clock lane HS Rx */ staticint cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, struct cio2_csi2_timing *timing, unsignedint bpp, unsignedint lanes)
{ struct device *dev = &cio2->pci_dev->dev; struct media_pad *src_pad;
s64 freq;
src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity); if (IS_ERR(src_pad)) {
dev_err(dev, "can't get source pad of %s (%ld)\n",
q->subdev.name, PTR_ERR(src_pad)); return PTR_ERR(src_pad);
}
dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
/* Configure MIPI backend */ for (i = 0; i < NUM_VCS; i++)
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
/* There are 16 short packet LUT entry */ for (i = 0; i < 16; i++)
writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
writel(0, base + CIO2_REG_CDMABA(i));
writel(0, base + CIO2_REG_CDMAC0(i));
writel(0, base + CIO2_REG_CDMAC1(i));
}
/* Enable DMA */
writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
/* Clear interrupts */
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
writel(~0, base + CIO2_REG_INT_STS);
/* Enable devices, starting from the last device in the pipe */
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
/* Halt DMA */
writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
value, value & CIO2_CDMAC0_DMA_HALTED,
4000, 2000000); if (ret)
dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
}
}
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
dev_warn(dev, "no ready buffers found on DMA channel %u\n",
dma_chan); return;
}
/* Find out which buffer(s) are ready */ do { struct cio2_buffer *b;
b = q->bufs[q->bufs_first]; if (b) { unsignedint received = entry[1].second_entry.num_of_bytes; unsignedlong payload =
vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
staticvoid cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
{ /* * For the user space camera control algorithms it is essential * to know when the reception of a frame has begun. That's often * the best timing information to get from the hardware.
*/ struct v4l2_event event = {
.type = V4L2_EVENT_FRAME_SYNC,
.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
};
v4l2_event_queue(q->subdev.devnode, &event);
}
staticconstchar *const cio2_irq_errs[] = { "single packet header error corrected", "multiple packet header errors detected", "payload checksum (CRC) error", "fifo overflow", "reserved short packet data type detected", "reserved long packet data type detected", "incomplete long packet detected", "frame sync error", "line sync error", "DPHY start of transmission error", "DPHY synchronization error", "escape mode error", "escape mode trigger event", "escape mode ultra-low power state for data lane(s)", "escape mode ultra-low power state exit for clock lane", "inter-frame short packet discarded", "inter-frame long packet discarded", "non-matching Long Packet stalled",
};
if (int_status & CIO2_INT_IOOE) { /* * Interrupt on Output Error: * 1) SRAM is full and FS received, or * 2) An invalid bit detected by DMA.
*/
u32 oe_status, oe_clear;
int_status = readl(base + CIO2_REG_INT_STS);
dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status); if (!int_status) return IRQ_NONE;
do {
writel(int_status, base + CIO2_REG_INT_STS);
cio2_irq_handle_once(cio2, int_status);
int_status = readl(base + CIO2_REG_INT_STS); if (int_status)
dev_dbg(dev, "pending status 0x%x\n", int_status);
} while (int_status);
/* * This code queues the buffer to the CIO2 DMA engine, which starts * running once streaming has started. It is possible that this code * gets pre-empted due to increased CPU load. Upon this, the driver * does not get an opportunity to queue new buffers to the CIO2 DMA * engine. When the DMA engine encounters an FBPT entry without the * VALID bit set, the DMA engine halts, which requires a restart of * the DMA engine and sensor, to continue streaming. * This is not desired and is highly unlikely given that there are * 32 FBPT entries that the DMA engine needs to process, to run into * an FBPT entry, without the VALID bit set. We try to mitigate this * by disabling interrupts for the duration of this queueing.
*/
local_irq_save(flags);
/* * fbpt_rp is the fbpt entry that the dma is currently working * on, but since it could jump to next entry at any time, * assume that we might already be there.
*/
fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
if (bufs_queued <= 1 || fbpt_rp == next) /* Buffers were drained */
next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
for (i = 0; i < CIO2_MAX_BUFFERS; i++) { /* * We have allocated CIO2_MAX_BUFFERS circularly for the * hw, the user has requested N buffer queue. The driver * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever * user queues a buffer, there necessarily is a free buffer.
*/ if (!q->bufs[next]) {
q->bufs[next] = b;
entry = &q->fbpt[next * CIO2_MAX_LOPS];
cio2_fbpt_entry_init_buf(cio2, b, entry);
local_irq_restore(flags);
q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS; for (j = 0; j < vb->num_planes; j++)
vb2_set_plane_payload(vb, j,
q->format.plane_fmt[j].sizeimage); return;
}
dev_dbg(dev, "entry %i was full!\n", next);
next = (next + 1) % CIO2_MAX_BUFFERS;
}
local_irq_restore(flags);
dev_err(dev, "error: all cio2 entries were full!\n");
atomic_dec(&q->bufs_queued);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
/* Called when each buffer is freed */ staticvoid cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{ struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); struct device *dev = &cio2->pci_dev->dev; struct cio2_buffer *b = to_cio2_buffer(vb); unsignedint i;
/* Free LOP table */ for (i = 0; i < CIO2_MAX_LOPS; i++) { if (b->lop[i])
dma_free_coherent(dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
fmt = cio2_find_format(&mpix->pixelformat, NULL); if (!fmt)
fmt = &formats[0];
/* Only supports up to 4224x3136 */ if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
mpix->width = CIO2_IMAGE_MAX_WIDTH; if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
mpix->height = CIO2_IMAGE_MAX_HEIGHT;
/* .complete() is called after all subdevices have been located */ staticint cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{ struct cio2_device *cio2 = to_cio2_device(notifier); struct sensor_async_subdev *s_asd; struct v4l2_async_connection *asd; struct cio2_queue *q; int ret;
/* * Proceed even without sensors connected to allow the device to * suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
ret = v4l2_async_nf_register(&cio2->notifier); if (ret)
dev_err(dev, "failed to register async notifier : %d\n", ret);
/* Create link from CIO2 subdev to output node */
r = media_create_pad_link(
&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (r) goto fail_link;
/* * On some platforms no connections to sensors are defined in firmware, * if the device has no endpoints then we can try to build those as * software_nodes parsed from SSDB.
*/
r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); if (r) return r;
writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
dev_dbg(dev, "cio2 runtime resume.\n");
return 0;
}
/* * Helper function to advance all the elements of a circular buffer by "start" * positions
*/ staticvoid arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
{ struct {
size_t begin, end;
} arr[2] = {
{ 0, start - 1 },
{ start, elems - 1 },
};
#define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
/* Loop as long as we have out-of-place entries */ while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
size_t size0, i;
/* * Find the number of entries that can be arranged on this * iteration.
*/
size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
/* Swap the entries in two parts of the array. */ for (i = 0; i < size0; i++) {
u8 *d = ptr + elem_size * (arr[1].begin + i);
u8 *s = ptr + elem_size * (arr[0].begin + i);
size_t j;
if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) { /* The end of the first array remains unarranged. */
arr[0].begin += size0;
} else { /* * The first array is fully arranged so we proceed * handling the next one.
*/
arr[0].begin = arr[1].begin;
arr[0].end = arr[1].begin + size0 - 1;
arr[1].begin += size0;
}
}
}
/* * DMA clears the valid bit when accessing the buffer. * When stopping stream in suspend callback, some of the buffers * may be in invalid state. After resume, when DMA meets the invalid * buffer, it will halt and stop receiving new data. * To avoid DMA halting, set the valid bit for all buffers in FBPT.
*/ for (i = 0; i < CIO2_MAX_BUFFERS; i++)
cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
}
/* * Upon resume, hw starts to process the fbpt entries from beginning, * so relocate the queued buffs to the fbpt head before suspend.
*/
cio2_fbpt_rearrange(cio2, q);
q->bufs_first = 0;
q->bufs_next = 0;
dev_dbg(dev, "cio2 resume\n"); if (!cio2->streaming) return 0; /* Start stream */
r = pm_runtime_force_resume(dev); if (r < 0) {
dev_err(dev, "failed to set power %d\n", r); return r;
}
r = cio2_hw_init(cio2, q); if (r) {
dev_err(dev, "fail to init cio2 hw\n"); return r;
}
r = v4l2_subdev_call(q->sensor, video, s_stream, 1); if (r) {
dev_err(dev, "fail to start sensor streaming\n");
cio2_hw_exit(cio2, q);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.