staticconst u64 tegra_shared_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), /* * The GPU sector layout is only supported on Tegra194, but these will * be filtered out later on by ->format_mod_supported() on SoCs where * it isn't supported.
*/
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, /* sentinel */
DRM_FORMAT_MOD_INVALID
};
staticint tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
{ int err = 0;
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 0) {
err = host1x_client_resume(wgrp->parent); if (err < 0) {
dev_err(wgrp->parent->dev, "failed to resume: %d\n", err); goto unlock;
}
reset_control_deassert(wgrp->rst);
}
wgrp->usecount++;
unlock:
mutex_unlock(&wgrp->lock); return err;
}
staticvoid tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
{ int err;
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 1) {
err = reset_control_assert(wgrp->rst); if (err < 0) {
pr_err("failed to assert reset for window group %u\n",
wgrp->index);
}
host1x_client_suspend(wgrp->parent);
}
wgrp->usecount--;
mutex_unlock(&wgrp->lock);
}
int tegra_display_hub_prepare(struct tegra_display_hub *hub)
{ unsignedint i;
/* * XXX Enabling/disabling windowgroups needs to happen when the owner * display controller is disabled. There's currently no good point at * which this could be executed, so unconditionally enable all window * groups for now.
*/ for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i];
/* Skip orphaned window group whose parent DC is disabled */ if (wgrp->parent)
tegra_windowgroup_enable(wgrp);
}
/* * XXX Remove this once window groups can be more fine-grainedly * enabled and disabled.
*/ for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i];
/* Skip orphaned window group whose parent DC is disabled */ if (wgrp->parent)
tegra_windowgroup_disable(wgrp);
}
}
value = tegra_dc_readl(dc, offset);
owner = value & OWNER_MASK;
if (new && (owner != OWNER_MASK && owner != new->pipe)) {
dev_WARN(dev, "window %u owned by head %u\n", index, owner); return -EBUSY;
}
/* * This seems to happen whenever the head has been disabled with one * or more windows being active. This is harmless because we'll just * reassign the window to the new head anyway.
*/ if (old && owner == OWNER_MASK)
dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
old->pipe, owner);
value &= ~OWNER_MASK;
if (new)
value |= OWNER(new->pipe); else
value |= OWNER_MASK;
if (!tegra_dc_owns_shared_plane(dc, plane)) {
err = tegra_shared_plane_set_owner(plane, dc); if (err < 0) return;
}
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
value |= MODE_FOUR_LINES;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
value = SLOTS(1);
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
/* disable watermark */
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
value &= ~LATENCY_CTL_MODE_ENABLE;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
value |= WATERMARK_MASK;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
/* pipe meter */
value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
/* mempool entries */
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
value = MEMPOOL_ENTRIES(0x331);
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
value &= ~THREAD_NUM_MASK;
value |= THREAD_NUM(plane->base.index);
value |= THREAD_GROUP_ENABLE;
tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
err = tegra_fb_get_tiling(new_plane_state->fb, tiling); if (err < 0) return err;
if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
!dc->soc->supports_block_linear) {
DRM_ERROR("hardware doesn't support block linear mode\n"); return -EINVAL;
}
if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
!dc->soc->supports_sector_layout) {
DRM_ERROR("hardware doesn't support GPU sector layout\n"); return -EINVAL;
}
/* * Tegra doesn't support different strides for U and V planes so we * error out if the user tries to display a framebuffer with such a * configuration.
*/ if (new_plane_state->fb->format->num_planes > 2) { if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n"); return -EINVAL;
}
}
/* XXX scaling is not yet supported, add a check here */
err = tegra_plane_state_add(&tegra->base, new_plane_state); if (err < 0) return err;
/* rien ne va plus */ if (!old_state || !old_state->crtc) return;
dc = to_tegra_dc(old_state->crtc);
err = host1x_client_resume(&dc->client); if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err); return;
}
/* * XXX Legacy helpers seem to sometimes call ->atomic_disable() even * on planes that are already disabled. Make sure we fallback to the * head for this particular state instead of crashing.
*/ if (WARN_ON(p->dc == NULL))
p->dc = dc;
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
tegra_dc_remove_shared_plane(dc, p);
host1x_client_suspend(&dc->client);
}
staticinline u32 compute_phase_incr(fixed20_12 in, unsignedint out)
{
u64 tmp, tmp1;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* * Physical address bit 39 in Tegra194 is used as a switch for special * logic that swizzles the memory using either the legacy Tegra or the * dGPU sector layout.
*/ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
addr_flag = BIT_ULL(39); #endif
base = tegra_plane_state->iova[0] + fb->offsets[0];
base |= addr_flag;
hub_state = tegra_display_hub_get_state(tegra->hub, state); if (IS_ERR(hub_state)) return PTR_ERR(hub_state);
/* * The display hub display clock needs to be fed by the display clock * with the highest frequency to ensure proper functioning of all the * displays. * * Note that this isn't used before Tegra186, but it doesn't hurt and * conditionalizing it would make the code less clean.
*/
for_each_oldnew_crtc_in_state(state, crtc, old, new, i) { struct tegra_dc_state *dc = to_dc_state(new);
if (hub_state->clk) {
err = clk_set_rate(hub_state->clk, hub_state->rate); if (err < 0)
dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
hub_state->clk, hub_state->rate);
err = clk_set_parent(hub->clk_disp, hub_state->clk); if (err < 0)
dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
hub->clk_disp, hub_state->clk, err);
}
if (hub_state->dc)
tegra_display_hub_update(hub_state->dc);
}
hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
GFP_KERNEL); if (!hub->clk_heads) return -ENOMEM;
for (i = 0; i < hub->num_heads; i++) {
child = of_get_next_child(pdev->dev.of_node, child); if (!child) {
dev_err(&pdev->dev, "failed to find node for head %u\n",
i); return -ENODEV;
}
clk = devm_get_clk_from_child(&pdev->dev, child, "dc"); if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock for head %u\n",
i);
of_node_put(child); return PTR_ERR(clk);
}
hub->clk_heads[i] = clk;
}
of_node_put(child);
/* XXX: enable clock across reset? */
err = reset_control_assert(hub->rst); if (err < 0) return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.