/* * The following helper functions, despite being named for bigjoiner, * are applicable to both bigjoiner and uncompressed joiner configurations.
*/ staticbool is_bigjoiner(conststruct intel_crtc_state *crtc_state)
{ return hweight8(crtc_state->joiner_pipes) >= 2;
}
static u8 bigjoiner_primary_pipes(conststruct intel_crtc_state *crtc_state)
{ if (!is_bigjoiner(crtc_state)) return 0;
/* * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or * any other logic, so lets just add helper function to * at least hide this hassle..
*/ static u8 ultrajoiner_enable_pipes(conststruct intel_crtc_state *crtc_state)
{ if (!intel_crtc_is_ultrajoiner(crtc_state)) return 0;
if (DISPLAY_VER(display) >= 4) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */ if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
TRANSCONF_STATE_ENABLE, 100))
drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
}
/* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check.
*/ if (HAS_GMCH(display)) { if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(display); else
assert_pll_enabled(display, pipe);
} else { if (new_crtc_state->has_pch_encoder) { /* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(display,
intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(display,
(enum pipe) cpu_transcoder);
} /* FIXME: assert CPU port conditions for SNB+ */
}
val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */
drm_WARN_ON(display->drm, !display->platform.i830); return;
}
/* Wa_1409098942:adlp+ */ if (DISPLAY_VER(display) >= 13 &&
new_crtc_state->dsc.compression_enable) {
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
TRANSCONF_PIXEL_COUNT_SCALING_X4);
}
intel_de_write(display, TRANSCONF(display, cpu_transcoder),
val | TRANSCONF_ENABLE);
intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
/* * Until the pipe starts PIPEDSL reads will return a stale value, * which causes an apparent vblank timestamp jump when PIPEDSL * resets to its proper value. That also messes up the frame count * when it's derived from the timestamps. So let's wait for the * pipe to start properly before we call drm_crtc_vblank_on()
*/ if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
intel_wait_for_pipe_scanline_moving(crtc);
}
/* * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display.
*/
assert_planes_disabled(crtc);
val = intel_de_read(display, TRANSCONF(display, cpu_transcoder)); if ((val & TRANSCONF_ENABLE) == 0) return;
/* * Double wide has implications for planes * so best keep it disabled when not needed.
*/ if (old_crtc_state->double_wide)
val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */ if (!display->platform.i830)
val &= ~TRANSCONF_ENABLE;
/* Wa_1409098942:adlp+ */ if (DISPLAY_VER(display) >= 13 &&
old_crtc_state->dsc.compression_enable)
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
/* * We assume the primary plane for pipe A has * the highest stride limits of them all, * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
crtc = intel_first_crtc(display); if (!crtc) return 0;
/* * Active_planes aliases if multiple "primary" or cursor planes * have been used on the same (or wrong) pipe. plane_mask uses * unique ids, hence we can use that to reconstruct active_planes.
*/
crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
/* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that * moment. So to make sure the plane gets truly disabled, disable * first the self-refresh mode. The self-refresh enable bit in turn * will be checked/applied by the HW only at the next frame start * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe.
*/ if (HAS_GMCH(display) &&
intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/* * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled.
*/ if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false);
/* * Display WA #1153: icl * enable hardware to bypass the alpha math * and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN; /* * Display WA # 1605353570: icl * Set the pixel rounding bit to 1 for allowing * passthrough of Frame buffer pixels unmodified * across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
/* * Underrun recovery must always be disabled on display 13+. * DG2 chicken bit meaning is inverted compared to other platforms.
*/ if (display->platform.dg2)
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; elseif ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30))
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */ if (display->platform.dg2)
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
/* * Finds the encoder associated with the given CRTC. This can only be * used when we know that the CRTC isn't feeding multiple encoders!
*/ struct intel_encoder *
intel_get_crtc_new_encoder(conststruct intel_atomic_state *state, conststruct intel_crtc_state *crtc_state)
{ conststruct drm_connector_state *connector_state; conststruct drm_connector *connector; struct intel_encoder *encoder = NULL; struct intel_crtc *primary_crtc; int num_encoders = 0; int i;
primary_crtc = intel_primary_crtc(crtc_state);
for_each_new_connector_in_state(&state->base, connector, connector_state, i) { if (connector_state->crtc != &primary_crtc->base) continue;
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { if (plane->need_async_flip_toggle_wa &&
plane->pipe == crtc->pipe &&
disable_async_flip_planes & BIT(plane->id)) { /* * Apart from the async flip bit we want to * preserve the old state for the plane.
*/
intel_plane_async_flip(NULL, plane,
old_crtc_state, old_plane_state, false);
need_vbl_wait = true;
}
}
if (need_vbl_wait)
intel_crtc_wait_for_next_vblank(crtc);
}
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
}
if (audio_disabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_disable(state, crtc);
intel_drrs_deactivate(old_crtc_state);
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
if (intel_fbc_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
intel_async_flip_vtd_wa(display, pipe, true);
/* Display WA 827 */ if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
skl_wa_827(display, pipe, true);
/* Wa_2006604312:icl,ehl */ if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(display, pipe, true);
/* Wa_1604331009:icl,jsl,ehl */ if (!needs_cursorclk_wa(old_crtc_state) &&
needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(display, pipe, true);
/* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that * moment. So to make sure the plane gets truly disabled, disable * first the self-refresh mode. The self-refresh enable bit in turn * will be checked/applied by the HW only at the next frame start * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe.
*/ if (HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled * when scaling is disabled. * * WaCxSRDisabledForSpriteScaling:ivb
*/ if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/* * If we're doing a modeset we don't need to do any * pre-vblank watermark programming here.
*/ if (!intel_crtc_needs_modeset(new_crtc_state)) { /* * For platforms that support atomic watermarks, program the * 'intermediate' watermarks immediately. On pre-gen9 platforms, these * will be the intermediate values that are safe for both pre- and * post- vblank; when vblank happens, the 'active' values will be set * to the final 'target' values and we'll do this again to get the * optimal watermarks. For gen9+ platforms, the values we program here * will be the final target values which will get automatically latched * at vblank time; no further programming will be necessary. * * If a platform hasn't been transitioned to atomic watermarks yet, * we'll continue to update watermarks the old way, if flags tell * us to.
*/ if (!intel_initial_watermarks(state, crtc)) if (new_crtc_state->update_wm_pre)
intel_update_watermarks(display);
}
/* * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled. * * We do this after .initial_watermarks() so that we have a * chance of catching underruns with the intermediate watermarks * vs. the old plane configuration.
*/ if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
/* * WA for platforms where async address update enable bit * is double buffered and only latched at start of vblank.
*/ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
intel_crtc_async_flip_disable_wa(state, crtc);
}
/* * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/ if (display->dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state)) continue;
if (drm_WARN_ON(display->drm, crtc->active)) return;
/* * Sometimes spurious CPU pipe underruns happen during FDI * training, at least with VGA+HDMI cloning. Suppress them. * * On ILK we get an occasional spurious CPU pipe underruns * between eDP port A enable and vdd enable. Also PCH port * enable seems to result in the occasional CPU pipe underrun. * * Spurious PCH underruns also occur during PCH enabling.
*/
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
intel_set_pch_fifo_underrun_reporting(display, pipe, false);
if (new_crtc_state->has_pch_encoder)
ilk_pch_enable(state, crtc);
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/* * Must wait for vblank to avoid spurious PCH FIFO underruns. * And a second vblank wait is needed at least on ILK with * some interlaced HDMI modes. Let's do the double wait always * in case there are more corner cases we don't know about.
*/ if (new_crtc_state->has_pch_encoder) {
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
}
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}
if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
intel_crtc_wait_for_next_vblank(pipe_crtc);
glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
}
/* * If we change the relative order between pipe/planes * enabling, we need to change the workaround.
*/
hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) { struct intel_crtc *wa_crtc =
intel_crtc_for_pipe(display, hsw_workaround_pipe);
/* * Sometimes spurious CPU pipe underruns happen when the * pipe is already disabled, but FDI RX/TX is still enabled. * Happens at least with VGA+HDMI cloning. Suppress them.
*/
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
intel_set_pch_fifo_underrun_reporting(display, pipe, false);
intel_encoders_disable(state, crtc);
intel_crtc_vblank_off(old_crtc_state);
intel_disable_transcoder(old_crtc_state);
ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
ilk_pch_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
if (old_crtc_state->has_pch_encoder)
ilk_pch_post_disable(state, crtc);
/* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions.
*/
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
/* Prefer intel_encoder_is_combo() */ bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
{ if (phy == PHY_NONE) returnfalse; elseif (display->platform.alderlake_s) return phy <= PHY_E; elseif (display->platform.dg1 || display->platform.rocketlake) return phy <= PHY_D; elseif (display->platform.jasperlake || display->platform.elkhartlake) return phy <= PHY_C; elseif (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12)) return phy <= PHY_B; else /* * DG2 outputs labelled as "combo PHY" in the bspec use * SNPS PHYs with completely different programming, * hence we always return false here.
*/ returnfalse;
}
/* Prefer intel_encoder_is_tc() */ bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{ /* * Discrete GPU phy's are not attached to FIA's to support TC * subsystem Legacy or non-legacy, and only support native DP/HDMI
*/ if (display->platform.dgfx) returnfalse;
/* Prefer intel_encoder_is_snps() */ bool intel_phy_is_snps(struct intel_display *display, enum phy phy)
{ /* * For DG2, and for DG2 only, all four "combo" ports and the TC1 port * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
*/ return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E;
}
/* Prefer intel_encoder_to_phy() */ enum phy intel_port_to_phy(struct intel_display *display, enum port port)
{ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) return PHY_D + port - PORT_D_XELPD; elseif (DISPLAY_VER(display) >= 13 && port >= PORT_TC1) return PHY_F + port - PORT_TC1; elseif (display->platform.alderlake_s && port >= PORT_TC1) return PHY_B + port - PORT_TC1; elseif ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1) return PHY_C + port - PORT_TC1; elseif ((display->platform.jasperlake || display->platform.elkhartlake) &&
port == PORT_D) return PHY_A;
return PHY_A + port - PORT_A;
}
/* Prefer intel_encoder_to_tc() */ enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{ if (!intel_phy_is_tc(display, intel_port_to_phy(display, port))) return TC_PORT_NONE;
if (DISPLAY_VER(display) >= 12) return TC_PORT_1 + port - PORT_TC1; else return TC_PORT_1 + port - PORT_C;
}
/* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe.
*/ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
intel_crtc_vblank_off(old_crtc_state);
intel_disable_transcoder(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (display->platform.cherryview)
chv_disable_pll(display, pipe); elseif (display->platform.valleyview)
vlv_disable_pll(display, pipe); else
i9xx_disable_pll(old_crtc_state);
}
intel_encoders_post_pll_disable(state, crtc);
if (DISPLAY_VER(display) != 2)
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */ if (display->platform.i830)
i830_enable_pipe(display, pipe);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.