/* SPDX-License-Identifier: MIT */ /* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
if (dc->debug.enable_mem_low_power.bits.dmcu) { // Force ERAM to shutdown if DMCU is not enabled if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
}
} /*dcn35 has default MEM_PWR enabled, make sure wake them up*/ // Set default OPTC memory power states if (dc->debug.enable_mem_low_power.bits.optc) { // Shutdown when unassigned and light sleep in VBLANK
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
if (dc->debug.enable_mem_low_power.bits.vga) { // Power down VGA memory
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
if (dc->debug.enable_mem_low_power.bits.mpc &&
dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { // Power down VPGs for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); #ifdefined(CONFIG_DRM_AMD_DC_DP2_0) for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); #endif
}
} #endif
staticvoid print_pg_status(struct dc *dc, constchar *debug_func, constchar *debug_log)
{ if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) { if (dc->res_pool->pg_cntl->funcs->print_pg_status)
dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
}
}
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else { // Not all ASICs have DCCG sw component
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) { /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector).
*/ struct dc_link *link = dc->links[i];
if (link->ep_type != DISPLAY_ENDPOINT_PHY) continue;
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */ if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true; if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
}
}
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. * Otherwise, if taking control is not possible, we need to power * everything down.
*/ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
// we want to turn off edp displays if odm is enabled and no seamless boot if (!dc->caps.seamless_odm) { for (i = 0; i < dc->res_pool->timing_generator_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i];
uint32_t num_opps, opp_id_src0, opp_id_src1;
num_opps = 1; if (tg) { if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
tg->funcs->get_optc_source(tg, &num_opps,
&opp_id_src0, &opp_id_src1);
}
}
if (num_opps > 1) {
dc->link_srv->blank_all_edp_displays(dc); break;
}
}
}
hws->funcs.init_pipes(dc, dc->current_state);
print_pg_status(dc, __func__, ": after init_pipes");
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
} for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
}
} if (dc->ctx->dmub_srv) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL)
abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
// Set i2c to light sleep until engine is setup if (dc->debug.enable_mem_low_power.bits.i2c)
REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
if (hws->funcs.setup_hpo_hw_control)
hws->funcs.setup_hpo_hw_control(hws, false);
if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (dc->debug.disable_mem_low_power) {
REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
} if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); // Get DMCUB capabilities if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
}
if (dc->res_pool->pg_cntl) { if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
}
print_pg_status(dc, __func__, ": after init_pg_status");
}
// Given any pipe_ctx, return the total ODM combine factor, and optionally return // the OPPids which are used staticunsignedint get_odm_config(struct pipe_ctx *pipe_ctx, unsignedint *opp_instances)
{ unsignedint opp_count = 1; struct pipe_ctx *odm_pipe;
// First get to the top pipe for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
;
// First pipe is always used if (opp_instances)
opp_instances[0] = odm_pipe->stream_res.opp->inst;
// Find and count odm pipes, if any for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { if (opp_instances)
opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
opp_count++;
}
return opp_count;
}
void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{ struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); struct mpc *mpc = dc->res_pool->mpc; int i;
/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
current_pipe_ctx->next_odm_pipe->stream_res.dsc) { struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; /* disconnect DSC block from stream */
dsc->funcs->dsc_disconnect(dsc);
}
}
}
if (hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating) {
hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating(
hws->ctx->dc->res_pool->dccg, phy_inst, clock_on);
}
}
/* In headless boot cases, DIG may be turned * on which causes HW/SW discrepancies. * To avoid this, power down hardware on boot * if DIG is turned on
*/ void dcn35_power_down_on_boot(struct dc *dc)
{ struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link = NULL; int edp_num; int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
/* * Call update_clocks with empty context * to send DISPLAY_OFF * Otherwise DISPLAY_OFF may not be asserted
*/ if (dc->clk_mgr->funcs->set_low_power_state)
dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
dc_allow_idle_optimizations(dc, true);
}
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{ if (dc->debug.dmcub_emulation) returntrue;
if (enable) {
uint32_t num_active_edp = 0; int i;
for (i = 0; i < dc->current_state->stream_count; ++i) { struct dc_stream_state *stream = dc->current_state->streams[i]; struct dc_link *link = stream->link; bool is_psr = link && !link->panel_config.psr.disable_psr &&
(link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
link->psr_settings.psr_version == DC_PSR_VERSION_SU_1); bool is_replay = link && link->replay_settings.replay_feature_enabled;
/* Ignore streams that disabled. */ if (stream->dpms_off) continue;
/* Active external displays block idle optimizations. */ if (!dc_is_embedded_signal(stream->signal)) returnfalse;
/* If not PWRSEQ0 can't enter idle optimizations */ if (link && link->link_index != 0) returnfalse;
/* Check for panel power features required for idle optimizations. */ if (!is_psr && !is_replay) returnfalse;
num_active_edp += 1;
}
/* If more than one active eDP then disallow. */ if (num_active_edp > 1) returnfalse;
}
// TODO: review other cases when idle optimization is allowed
dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
returntrue;
}
void dcn35_z10_restore(conststruct dc *dc)
{ if (dc->debug.disable_z10) return;
for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) {
can_apply_seamless_boot = true; break;
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly * to non-preferred front end. If pipe_ctx->stream is not NULL, * we will use the pipe, so don't disable
*/ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
/* Blank controller using driver code instead of * command table.
*/ if (tg->funcs->is_tg_enabled(tg)) { if (hws->funcs.init_blank != NULL) {
hws->funcs.init_blank(dc, tg);
tg->funcs->lock(tg);
} else {
tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
}
}
/* Reset det size */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = dc->res_pool->hubps[i];
/* Do not need to reset for seamless boot */ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
if (hubbub && hubp) { if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
}
}
/* num_opp will be equal to number of mpcc */ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Cannot reset the MPC mux if seamless boot */ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; struct hubp *hubp = dc->res_pool->hubps[i]; struct dpp *dpp = dc->res_pool->dpps[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly * to non-preferred front end. If pipe_ctx->stream is not NULL, * we will use the pipe, so don't disable
*/ if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
pipe_ctx->stream_res.tg)) { // Enable double buffering for OTG_BLANK no matter if // seamless boot is enabled or not to suppress global sync // signals when OTG blanked. This is to prevent pipe from // requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
tg_enabled[i] = true; continue;
}
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (tg->funcs->is_tg_enabled(tg)) { if (tg->funcs->init_odm)
tg->funcs->init_odm(tg);
}
tg->funcs->tg_init(tg);
}
/* Clean up MPC tree */ for (i = 0; i < dc->res_pool->pipe_count; i++) { if (tg_enabled[i]) { if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
// Step 1: To find out which OPTC is running & OPTC DSC is ON // We can't use res_pool->res_cap->num_timing_generator to check // Because it records display pipes default setting built in driver, // not display pipes of the current chip. // Some ASICs would be fused display pipes less than the default setting. // In dcnxx_resource_construct function, driver would obatin real information. for (i = 0; i < dc->res_pool->timing_generator_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg)) { if (tg->funcs->get_dsc_status)
tg->funcs->get_dsc_status(tg, &optc_dsc_state); // Only one OPTC with DSC is ON, so if we got one result, // we would exit this block. non-zero value is DSC enabled if (optc_dsc_state != 0) {
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); break;
}
}
}
// Step 2: To power down DSC but skip DSC of running OPTC for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0};
/* avoid reading DSC state when it is not in use as it may be power gated */ if (optc_dsc_state) {
dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
/* In flip immediate with pipe splitting case GSL is used for * synchronization so we must disable it when the plane is disabled.
*/ if (pipe_ctx->stream_res.gsl_group != 0)
dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); /* if (hubp->funcs->hubp_update_mall_sel) hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
*/
dc->hwss.set_flip_control_gsl(pipe_ctx, false);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return;
if (hws->funcs.plane_atomic_disable)
hws->funcs.plane_atomic_disable(dc, pipe_ctx);
/* Turn back off the phantom OTG after the phantom plane is fully disabled
*/ if (is_phantom) if (tg && tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state)
{ bool hpo_frl_stream_enc_acquired = false; bool hpo_dp_stream_enc_acquired = false; int i = 0, j = 0; int edp_num = 0; struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) { if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
dc->res_pool->hpo_dp_stream_enc[i]) {
hpo_dp_stream_enc_acquired = true; break;
}
}
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
if (pipe_ctx->stream_res.dsc) {
update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false; if (dc->caps.sequential_ono) {
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) { for (j = 0; j < dc->res_pool->pipe_count; ++j) {
update_state->pg_pipe_res_update[PG_HUBP][j] = false;
update_state->pg_pipe_res_update[PG_DPP][j] = false;
}
}
}
}
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
if (pipe_ctx->stream_res.hpo_dp_stream_enc)
update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
}
for (i = 0; i < dc->link_count; i++) {
update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true; if (dc->links[i]->type != dc_connection_none)
update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = false;
}
/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ for (i = 0; i < dc->res_pool->timing_generator_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; if (tg && tg->funcs->is_tg_enabled(tg)) {
update_state->pg_pipe_res_update[PG_OPTC][i] = false; break;
}
}
dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num == 0 ||
((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
(!edp_links[1] || !edp_links[1]->edp_sink_present))) { /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
update_state->pg_pipe_res_update[PG_OPTC][0] = false;
}
if (dc->caps.sequential_ono) { for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
!update_state->pg_pipe_res_update[PG_DPP][i]) { for (j = i - 1; j >= 0; j--) {
update_state->pg_pipe_res_update[PG_HUBP][j] = false;
update_state->pg_pipe_res_update[PG_DPP][j] = false;
}
break;
}
}
}
}
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state)
{ bool hpo_frl_stream_enc_acquired = false; bool hpo_dp_stream_enc_acquired = false; int i = 0, j = 0;
for (i = 0; i < dc->link_count; i++) if (dc->links[i]->type != dc_connection_none)
update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) { if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
dc->res_pool->hpo_dp_stream_enc[i]) {
hpo_dp_stream_enc_acquired = true; break;
}
}
if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
if (hpo_frl_stream_enc_acquired)
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ if (new_pipe->plane_res.hubp &&
new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) { for (j = 0; j < dc->res_pool->pipe_count; ++j) {
update_state->pg_pipe_res_update[PG_HUBP][j] = true;
update_state->pg_pipe_res_update[PG_DPP][j] = true;
}
}
}
}
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { for (j = i - 1; j >= 0; j--) {
update_state->pg_pipe_res_update[PG_HUBP][j] = true;
update_state->pg_pipe_res_update[PG_DPP][j] = true;
}
break;
}
}
}
}
/** * dcn35_hw_block_power_down() - power down sequence * * The following sequence describes the ON-OFF (ONO) for power down: * * ONO Region 3, DCPG 25: hpo - SKIPPED * ONO Region 4, DCPG 0: dchubp0, dpp0 * ONO Region 6, DCPG 1: dchubp1, dpp1 * ONO Region 8, DCPG 2: dchubp2, dpp2 * ONO Region 10, DCPG 3: dchubp3, dpp3 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry * ONO Region 5, DCPG 16: dsc0 * ONO Region 7, DCPG 17: dsc1 * ONO Region 9, DCPG 18: dsc2 * ONO Region 11, DCPG 19: dsc3 * ONO Region 2, DCPG 24: mpc opp optc dwb * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed * * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending. * * @dc: Current DC state * @update_state: update PG sequence states for HW block
*/ void dcn35_hw_block_power_down(struct dc *dc, struct pg_block_update *update_state)
{ int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl) return; if (dc->debug.ignore_pg) return;
if (update_state->pg_res_update[PG_HPO]) { if (pg_cntl->funcs->hpo_pg_control)
pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
}
if (!dc->caps.sequential_ono) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control)
pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
}
}
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control)
pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
}
}
} else { for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control)
pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
}
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control)
pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
}
}
}
/*this will need all the clients to unregister optc interruts let dmubfw handle this*/ if (pg_cntl->funcs->plane_otg_pg_control)
pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
//domain22, 23, 25 currently always on.
}
/** * dcn35_hw_block_power_up() - power up sequence * * The following sequence describes the ON-OFF (ONO) for power up: * * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED * ONO Region 2, DCPG 24: mpc opp optc dwb * ONO Region 5, DCPG 16: dsc0 * ONO Region 7, DCPG 17: dsc1 * ONO Region 9, DCPG 18: dsc2 * ONO Region 11, DCPG 19: dsc3 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit * ONO Region 4, DCPG 0: dchubp0, dpp0 * ONO Region 6, DCPG 1: dchubp1, dpp1 * ONO Region 8, DCPG 2: dchubp2, dpp2 * ONO Region 10, DCPG 3: dchubp3, dpp3 * ONO Region 3, DCPG 25: hpo - SKIPPED * * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending. * * @dc: Current DC state * @update_state: update PG sequence states for HW block
*/ void dcn35_hw_block_power_up(struct dc *dc, struct pg_block_update *update_state)
{ int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl) return; if (dc->debug.ignore_pg) return; //domain22, 23, 25 currently always on. /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ if (pg_cntl->funcs->plane_otg_pg_control)
pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
if (!dc->caps.sequential_ono) { for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control)
pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control)
pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
}
if (dc->caps.sequential_ono) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control)
pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
}
}
} if (update_state->pg_res_update[PG_HPO]) { if (pg_cntl->funcs->hpo_pg_control)
pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
}
} void dcn35_root_clock_control(struct dc *dc, struct pg_block_update *update_state, bool power_on)
{ int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl) return; /*enable root clock first when power up*/ if (power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
} if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) if (dc->hwseq->funcs.dpstream_root_clock_control)
dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
for (i = 0; i < dc->res_pool->dig_link_enc_count; i++) if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i]) if (dc->hwseq->funcs.physymclk_root_clock_control)
dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
} for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { if (dc->res_pool->dccg->funcs->enable_dsc)
dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
} else { if (dc->res_pool->dccg->funcs->disable_dsc)
dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
}
}
} /*disable root clock first when power down*/ if (!power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) { if (dc->hwseq->funcs.dpp_root_clock_control)
dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
} if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) if (dc->hwseq->funcs.dpstream_root_clock_control)
dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
}
for (i = 0; i < dc->res_pool->dig_link_enc_count; i++) if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i]) if (dc->hwseq->funcs.physymclk_root_clock_control)
dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
if (dc->hwss.calc_blocks_to_ungate) {
dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, true); /*power up required HW block*/ if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
dcn20_prepare_bandwidth(dc, context);
print_pg_status(dc, __func__, ": after rcg and power up");
}
print_pg_status(dc, __func__, ": before rcg and power up");
dcn20_optimize_bandwidth(dc, context);
if (dc->hwss.calc_blocks_to_gate) {
dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state); /*try to power down unused block*/ if (dc->hwss.hw_block_power_down)
dc->hwss.hw_block_power_down(dc, &pg_update_state);
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust)
{ int i = 0; struct drr_params params = {0}; // DRR set trigger event mapped to OTG_TRIG_A unsignedint event_triggers = 0x2;//Bit[1]: OTG_TRIG_A // Note DRR trigger events are generated regardless of whether num frames met. unsignedint num_frames = 2;
for (i = 0; i < num_pipes; i++) { /* dc_state_destruct() might null the stream resources, so fetch tg * here first to avoid a race condition. The lifetime of the pointee * itself (the timing_generator object) is not a problem here.
*/ struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) { if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) { struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; struct dc *dc = pipe_ctx[i]->stream->ctx->dc; unsignedint frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
staticbool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
{ /* Calculate average pixel count per TU, return false if under ~2.00 to * avoid empty TUs. This is only required for DPIA tunneling as empty TUs * are legal to generate for native DP links. Assume TU size 64 as there * is currently no scenario where it's reprogrammed from HW default. * MTPs have no such limitation, so this does not affect MST use cases.
*/ unsignedint pix_clk_mhz; unsignedint symclk_mhz; unsignedint avg_pix_per_tu_x1000; unsignedint tu_size_bytes = 64; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; conststruct dc *dc = pipe_ctx->stream->link->dc;
if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false) returnfalse;
// Not necessary for MST configurations if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) returnfalse;
pix_clk_mhz = timing->pix_clk_100hz / 10000;
// If this is true, can't block due to dynamic ODM if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz) returnfalse;
switch (link_settings->link_rate) { case LINK_RATE_LOW:
symclk_mhz = 162; break; case LINK_RATE_HIGH:
symclk_mhz = 270; break; case LINK_RATE_HIGH2:
symclk_mhz = 540; break; case LINK_RATE_HIGH3:
symclk_mhz = 810; break; default: // We shouldn't be tunneling any other rates, something is wrong
ASSERT(0); returnfalse;
}
// Add small empirically-decided margin to account for potential jitter return (avg_pix_per_tu_x1000 < 2020);
}
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{ struct dc *dc = pipe_ctx->stream->ctx->dc;
if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) returnfalse;
if (should_avoid_empty_tu(pipe_ctx)) returnfalse;
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy) returntrue;
returnfalse;
}
/* * Set powerup to true for every pipe to match pre-OS configuration.
*/ staticvoid dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
{ int i = 0, j = 0;
/* * The purpose is to power up all gatings to restore optimization to pre-OS env. * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
*/ void dcn35_hardware_release(struct dc *dc)
{ struct pg_block_update pg_update_state;
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, true); /*power up required HW block*/ if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.18 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.