/* * STI GDP structure * * @sti_plane: sti_plane structure * @dev: driver device * @regs: gdp registers * @clk_pix: pixel clock for the current gdp * @clk_main_parent: gdp parent clock if main path used * @clk_aux_parent: gdp parent clock if aux path used * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification * @is_curr_top: true if the current node processed is the top field * @node_list: array of node list * @vtg: registered vtg
*/ struct sti_gdp { struct sti_plane plane; struct device *dev; void __iomem *regs; struct clk *clk_pix; struct clk *clk_main_parent; struct clk *clk_aux_parent; struct notifier_block vtg_field_nb; bool is_curr_top; struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; struct sti_vtg *vtg;
};
staticvoid gdp_dbg_ctl(struct seq_file *s, int val)
{ int i;
seq_puts(s, "\tColor:"); for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) { if (gdp_format_to_str[i].format == (val & 0x1F)) {
seq_puts(s, gdp_format_to_str[i].name); break;
}
} if (i == ARRAY_SIZE(gdp_format_to_str))
seq_puts(s, "");
seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
}
staticvoid gdp_dbg_vpo(struct seq_file *s, int val)
{
seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
staticvoid gdp_dbg_vps(struct seq_file *s, int val)
{
seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
staticvoid gdp_dbg_size(struct seq_file *s, int val)
{
seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
}
for (i = 0; i < GDP_NODE_NB_BANK; i++) { if (gdp->node_list[i].top_field_paddr == val) {
base = gdp->node_list[i].top_field; break;
} if (gdp->node_list[i].btm_field_paddr == val) {
base = gdp->node_list[i].btm_field; break;
}
}
if (base)
seq_printf(s, "\tVirt @: %p", base);
}
staticvoid gdp_dbg_ppt(struct seq_file *s, int val)
{ if (val & GAM_GDP_PPT_IGNORE)
seq_puts(s, "\tNot displayed on mixer!");
}
staticvoid gdp_dbg_mst(struct seq_file *s, int val)
{ if (val & 1)
seq_puts(s, "\tBUFFER UNDERFLOW!");
}
seq_puts(s, "\n\n"); if (!crtc)
seq_puts(s, " Not connected to any DRM CRTC\n"); else
seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
staticint sti_gdp_fourcc2format(int fourcc)
{ switch (fourcc) { case DRM_FORMAT_XRGB8888: return GDP_RGB888_32; case DRM_FORMAT_XBGR8888: return GDP_XBGR8888; case DRM_FORMAT_ARGB8888: return GDP_ARGB8888; case DRM_FORMAT_ABGR8888: return GDP_ABGR8888; case DRM_FORMAT_ARGB4444: return GDP_ARGB4444; case DRM_FORMAT_ARGB1555: return GDP_ARGB1555; case DRM_FORMAT_RGB565: return GDP_RGB565; case DRM_FORMAT_RGB888: return GDP_RGB888;
} return -1;
}
staticint sti_gdp_get_alpharange(int format)
{ switch (format) { case GDP_ARGB8565: case GDP_ARGB8888: case GDP_ABGR8888: return GAM_GDP_ALPHARANGE_255;
} return 0;
}
/** * sti_gdp_get_free_nodes * @gdp: gdp pointer * * Look for a GDP node list that is not currently read by the HW. * * RETURNS: * Pointer to the free GDP node list
*/ staticstruct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{ int hw_nvn; unsignedint i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++) if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
(hw_nvn != gdp->node_list[i].top_field_paddr)) return &gdp->node_list[i];
/* in hazardous cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_plane_to_str(&gdp->plane), hw_nvn);
end: return &gdp->node_list[0];
}
/** * sti_gdp_get_current_nodes * @gdp: gdp pointer * * Look for GDP nodes that are currently read by the HW. * * RETURNS: * Pointer to the current GDP node list
*/ static struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{ int hw_nvn; unsignedint i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++) if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
(hw_nvn == gdp->node_list[i].top_field_paddr)) return &gdp->node_list[i];
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/** * sti_gdp_disable * @gdp: gdp pointer * * Disable a GDP.
*/ staticvoid sti_gdp_disable(struct sti_gdp *gdp)
{ unsignedint i;
/* Set the nodes as 'to be ignored on mixer' */ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
/** * sti_gdp_field_cb * @nb: notifier block * @event: event message * @data: private data * * Handle VTG top field and bottom field event. * * RETURNS: * 0 on success.
*/ staticint sti_gdp_field_cb(struct notifier_block *nb, unsignedlong event, void *data)
{ struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
if (gdp->plane.status == STI_PLANE_FLUSHING) { /* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&gdp->plane));
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n"); return;
}
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) { if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n"); return;
}
gdp->node_list[i].top_field = base;
gdp->node_list[i].top_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n"); return;
}
gdp->node_list[i].btm_field = base;
gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) { /* GDP of STiH407 chip have its own pixel clock */ char *clk_name;
switch (gdp->plane.desc) { case STI_GDP_0:
clk_name = "pix_gdp1"; break; case STI_GDP_1:
clk_name = "pix_gdp2"; break; case STI_GDP_2:
clk_name = "pix_gdp3"; break; case STI_GDP_3:
clk_name = "pix_gdp4"; break; default:
DRM_ERROR("GDP id not recognized\n"); return;
}
gdp->clk_pix = devm_clk_get(gdp->dev, clk_name); if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent"); if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent"); if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
format = sti_gdp_fourcc2format(fb->format->format); if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
(char *)&fb->format->format); return -EINVAL;
}
if (!drm_fb_dma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get DMA GEM object for fb\n"); return -EINVAL;
}
/* Set gdp clock */ if (mode->clock && gdp->clk_pix) { struct clk *clkp; int rate = mode->clock * 1000; int res;
/* * According to the mixer used, the gdp pixel clock * should have a different parent clock.
*/ if (mixer->id == STI_MIXER_MAIN)
clkp = gdp->clk_main_parent; else
clkp = gdp->clk_aux_parent;
if (clkp)
clk_set_parent(gdp->clk_pix, clkp);
res = clk_set_rate(gdp->clk_pix, rate); if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
rate); return -EINVAL;
}
}
/* Update the NVN field of the 'right' field of the current GDP node * (being used by the HW) with the address of the updated ('free') top * field GDP node. * - In interlaced mode the 'right' field is the bottom field as we * update frames starting from their top field * - In progressive mode, we update both bottom and top fields which * are equal nodes. * At the next VSYNC, the updated node list will be used by the HW.
*/
curr_list = sti_gdp_get_current_nodes(gdp);
dma_updated_top = list->top_field_paddr;
dma_updated_btm = list->btm_field_paddr;
if (!curr_list) { /* First update or invalid node should directly write in the
* hw register */
DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
sti_plane_to_str(plane));
if (mode->flags & DRM_MODE_FLAG_INTERLACE) { if (gdp->is_curr_top) { /* Do not update in the middle of the frame, but * postpone the update after the bottom field has
* been displayed */
curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
} else { /* Direct update to avoid one frame delay */
writel(dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
}
} else { /* Direct update for progressive to avoid one frame delay */
writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.