struct bdisp_op_cfg { bool cconv; /* RGB - YUV conversion */ bool hflip; /* Horizontal flip */ bool vflip; /* Vertical flip */ bool wide; /* Wide (>MAX_SRC_WIDTH) */ bool scale; /* Scale */
u16 h_inc; /* Horizontal increment in 6.10 format */
u16 v_inc; /* Vertical increment in 6.10 format */ bool src_interlaced; /* is the src an interlaced buffer */
u8 src_nbp; /* nb of planes of the src */ bool src_yuv; /* is the src a YUV color format */ bool src_420; /* is the src 4:2:0 chroma subsampled */
u8 dst_nbp; /* nb of planes of the dst */ bool dst_yuv; /* is the dst a YUV color format */ bool dst_420; /* is the dst 4:2:0 chroma subsampled */
};
/* Wait for reset done */ for (i = 0; i < POLL_RST_MAX; i++) { if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE) break;
udelay(POLL_RST_DELAY_MS * 1000);
} if (i == POLL_RST_MAX)
dev_err(bdisp->dev, "Reset timeout\n");
return (i == POLL_RST_MAX) ? -EAGAIN : 0;
}
/** * bdisp_hw_get_and_clear_irq * @bdisp: bdisp entity * * Read then reset interrupt status * * RETURNS: * 0 if expected interrupt was raised.
*/ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
{
u32 its;
its = readl(bdisp->regs + BLT_ITS);
/* Check for the only expected IT: LastNode of AQ1 */ if (!(its & BLT_ITS_AQ1_LNA)) {
dev_dbg(bdisp->dev, "Unexpected IT status: 0x%08X\n", its);
writel(its, bdisp->regs + BLT_ITS); return -1;
}
/* Allocate all the nodes within a single memory page */
base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
GFP_KERNEL, DMA_ATTR_WRITE_COMBINE); if (!base) {
dev_err(dev, "%s no mem\n", __func__); return -ENOMEM;
}
memset(base, 0, node_size * MAX_NB_NODE);
for (i = 0; i < MAX_NB_NODE; i++) {
ctx->node[i] = base;
ctx->node_paddr[i] = paddr;
dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
&paddr);
base += node_size;
paddr += node_size;
}
if (bdisp_h_filter[0].virt)
dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
}
/** * bdisp_hw_alloc_filters * @dev: device * * Allocate dma memory for filters * * RETURNS: * 0 on success
*/ int bdisp_hw_alloc_filters(struct device *dev)
{ unsignedint i, size; void *base;
dma_addr_t paddr;
/* Allocate all the filters within a single memory page */
size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL,
DMA_ATTR_WRITE_COMBINE); if (!base) return -ENOMEM;
/* Setup filter addresses */ for (i = 0; i < NB_H_FILTER; i++) {
bdisp_h_filter[i].min = bdisp_h_spec[i].min;
bdisp_h_filter[i].max = bdisp_h_spec[i].max;
memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
bdisp_h_filter[i].virt = base;
bdisp_h_filter[i].paddr = paddr;
base += BDISP_HF_NB;
paddr += BDISP_HF_NB;
}
for (i = 0; i < NB_V_FILTER; i++) {
bdisp_v_filter[i].min = bdisp_v_spec[i].min;
bdisp_v_filter[i].max = bdisp_v_spec[i].max;
memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
bdisp_v_filter[i].virt = base;
bdisp_v_filter[i].paddr = paddr;
base += BDISP_VF_NB;
paddr += BDISP_VF_NB;
}
/** * bdisp_hw_color_format * @pixelformat: v4l2 pixel format * * v4l2 to bdisp pixel format convert * * RETURNS: * bdisp pixel format
*/ static u32 bdisp_hw_color_format(u32 pixelformat)
{
u32 ret;
switch (pixelformat) { case V4L2_PIX_FMT_YUV420:
ret = (BDISP_YUV_3B << BLT_TTY_COL_SHIFT); break; case V4L2_PIX_FMT_NV12:
ret = (BDISP_NV12 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END; break; case V4L2_PIX_FMT_RGB565:
ret = (BDISP_RGB565 << BLT_TTY_COL_SHIFT); break; case V4L2_PIX_FMT_XBGR32: /* This V4L format actually refers to xRGB */
ret = (BDISP_XRGB8888 << BLT_TTY_COL_SHIFT); break; case V4L2_PIX_FMT_RGB24: /* RGB888 format */
ret = (BDISP_RGB888 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END; break; case V4L2_PIX_FMT_ABGR32: /* This V4L format actually refers to ARGB */
default:
ret = (BDISP_ARGB8888 << BLT_TTY_COL_SHIFT) | BLT_TTY_ALPHA_R; break;
}
return ret;
}
/** * bdisp_hw_build_node * @ctx: device context * @cfg: operation configuration * @node: node to be set * @t_plan: whether the node refers to a RGB/Y or a CbCr plane * @src_x_offset: x offset in the source image * * Build a node * * RETURNS: * None
*/ staticvoid bdisp_hw_build_node(struct bdisp_ctx *ctx, struct bdisp_op_cfg *cfg, struct bdisp_node *node, enum bdisp_target_plan t_plan, int src_x_offset)
{ struct bdisp_frame *src = &ctx->src; struct bdisp_frame *dst = &ctx->dst;
u16 h_inc, v_inc, yh_inc, yv_inc; struct v4l2_rect src_rect = src->crop; struct v4l2_rect dst_rect = dst->crop; int dst_x_offset;
s32 dst_width = dst->crop.width;
u32 src_fmt, dst_fmt; const u32 *ivmx;
dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
memset(node, 0, sizeof(*node));
/* Adjust src and dst areas wrt src_x_offset */
src_rect.left += src_x_offset;
src_rect.width -= src_x_offset;
src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
if ((t_plan != BDISP_CBCR) && cfg->src_420) { /* No chroma upsampling for output RGB / Y plane */
node->s3xy = node->s2xy * 2;
node->s3sz = node->s2sz * 2;
} else { /* No need to read Y (Src3) when writing Chroma */
node->s3ty |= BLT_S3TY_BLANK_ACC;
node->s3xy = node->s2xy;
node->s3sz = node->s2sz;
}
}
/* Resize (scale OR 4:2:0: chroma up/downsampling) */ if (node->ins & BLT_INS_SCALE) { /* no need to compute Y when writing CbCr from RGB input */ bool skip_y = (t_plan == BDISP_CBCR) && !cfg->src_yuv;
/* FCTL */ if (cfg->scale) {
node->fctl = BLT_FCTL_HV_SCALE; if (!skip_y)
node->fctl |= BLT_FCTL_Y_HV_SCALE;
} else {
node->fctl = BLT_FCTL_HV_SAMPLE; if (!skip_y)
node->fctl |= BLT_FCTL_Y_HV_SAMPLE;
}
/* RSF - Chroma may need to be up/downsampled */
h_inc = cfg->h_inc;
v_inc = cfg->v_inc; if (!cfg->src_420 && cfg->dst_420 && (t_plan == BDISP_CBCR)) { /* RGB to 4:2:0 for Chroma: downsample */
h_inc *= 2;
v_inc *= 2;
} elseif (cfg->src_420 && !cfg->dst_420) { /* 4:2:0: to RGB: upsample*/
h_inc /= 2;
v_inc /= 2;
}
node->rsf = v_inc << 16 | h_inc;
/** * bdisp_hw_build_all_nodes * @ctx: device context * * Build all the nodes for the blitter operation * * RETURNS: * 0 on success
*/ staticint bdisp_hw_build_all_nodes(struct bdisp_ctx *ctx)
{ struct bdisp_op_cfg cfg; unsignedint i, nid = 0; int src_x_offset = 0;
for (i = 0; i < MAX_NB_NODE; i++) if (!ctx->node[i]) {
dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i); return -EINVAL;
}
/* Get configuration (scale, flip, ...) */ if (bdisp_hw_get_op_cfg(ctx, &cfg)) return -EINVAL;
/* Split source in vertical strides (HW constraint) */ for (i = 0; i < MAX_VERTICAL_STRIDES; i++) { /* Build RGB/Y node and link it to the previous node */
bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
cfg.dst_nbp == 1 ? BDISP_RGB : BDISP_Y,
src_x_offset); if (nid)
ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
nid++;
/* Build additional Cb(Cr) node, link it to the previous one */ if (cfg.dst_nbp > 1) {
bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
BDISP_CBCR, src_x_offset);
ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
nid++;
}
/* Next stride until full width covered */
src_x_offset += MAX_SRC_WIDTH; if (src_x_offset >= ctx->src.crop.width) break;
}
/* Mark last node as the last */
ctx->node[nid - 1]->nip = 0;
return 0;
}
/** * bdisp_hw_save_request * @ctx: device context * * Save a copy of the request and of the built nodes * * RETURNS: * None
*/ staticvoid bdisp_hw_save_request(struct bdisp_ctx *ctx)
{ struct bdisp_node **copy_node = ctx->bdisp_dev->dbg.copy_node; struct bdisp_request *request = &ctx->bdisp_dev->dbg.copy_request; struct bdisp_node **node = ctx->node; int i;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.